diff --git a/hack/.golint_failures b/hack/.golint_failures index cde5383551..95a0b7fb99 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -393,6 +393,9 @@ pkg/volume/secret pkg/volume/storageos pkg/volume/testing pkg/volume/util +pkg/volume/util/fs +pkg/volume/util/recyclerclient +pkg/volume/util/volumepathhandler pkg/volume/vsphere_volume plugin/pkg/admission/antiaffinity plugin/pkg/admission/eventratelimit/apis/eventratelimit diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index 6eb9f3dc02..67458a9b97 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -2150,17 +2150,17 @@ func (c *Cloud) CreateDisk(volumeOptions *VolumeOptions) (KubernetesVolumeID, er var createAZ string if !volumeOptions.ZonePresent && !volumeOptions.ZonesPresent { - createAZ = volume.ChooseZoneForVolume(allZones, volumeOptions.PVCName) + createAZ = volumeutil.ChooseZoneForVolume(allZones, volumeOptions.PVCName) } if !volumeOptions.ZonePresent && volumeOptions.ZonesPresent { if adminSetOfZones, err := volumeutil.ZonesToSet(volumeOptions.AvailabilityZones); err != nil { return "", err } else { - createAZ = volume.ChooseZoneForVolume(adminSetOfZones, volumeOptions.PVCName) + createAZ = volumeutil.ChooseZoneForVolume(adminSetOfZones, volumeOptions.PVCName) } } if volumeOptions.ZonePresent && !volumeOptions.ZonesPresent { - if err := volume.ValidateZone(volumeOptions.AvailabilityZone); err != nil { + if err := volumeutil.ValidateZone(volumeOptions.AvailabilityZone); err != nil { return "", err } createAZ = volumeOptions.AvailabilityZone @@ -2476,7 +2476,7 @@ func (c *Cloud) ResizeDisk( } requestBytes := newSize.Value() // AWS resizes in chunks of GiB (not GB) - requestGiB := volume.RoundUpSize(requestBytes, 1024*1024*1024) + requestGiB := volumeutil.RoundUpSize(requestBytes, 1024*1024*1024) newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB)) // If disk already if of greater or equal size than requested we return diff --git a/pkg/cloudprovider/providers/gce/gce_disks.go b/pkg/cloudprovider/providers/gce/gce_disks.go index 7f60df4ee1..faa813109e 100644 --- a/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/pkg/cloudprovider/providers/gce/gce_disks.go @@ -771,7 +771,7 @@ func (gce *GCECloud) ResizeDisk(diskToResize string, oldSize resource.Quantity, requestBytes := newSize.Value() // GCE resizes in chunks of GBs (not GiB) - requestGB := volume.RoundUpSize(requestBytes, 1000*1000*1000) + requestGB := volumeutil.RoundUpSize(requestBytes, 1000*1000*1000) newSizeQuant := resource.MustParse(fmt.Sprintf("%dG", requestGB)) // If disk is already of size equal or greater than requested size, we simply return diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index 8b7277afe1..37c71c36fe 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -412,7 +412,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, oldSize resource.Quantity, ne volSizeBytes := newSize.Value() // Cinder works with gigabytes, convert to GiB with rounding up - volSizeGB := int(k8s_volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) + volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024)) newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", volSizeGB)) // if volume size equals to or greater than the newSize, return nil diff --git a/pkg/controller/volume/attachdetach/BUILD b/pkg/controller/volume/attachdetach/BUILD index d0d8ccfca1..4d5908494e 100644 --- a/pkg/controller/volume/attachdetach/BUILD +++ b/pkg/controller/volume/attachdetach/BUILD @@ -23,7 +23,7 @@ go_library( "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index edbf1ca850..b719ffaaa3 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -47,7 +47,7 @@ import ( "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) // TimerConfig contains configuration of internal attach/detach timers and @@ -137,7 +137,7 @@ func NewAttachDetachController( eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"}) - blkutil := volumeutil.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr) adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr) @@ -361,7 +361,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { err) continue } - volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) + volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) if err != nil { glog.Errorf( "Failed to find unique name for volume %q, pod %q/%q: %v", @@ -587,10 +587,10 @@ func (adc *attachDetachController) GetExec(pluginName string) mount.Exec { } func (adc *attachDetachController) addNodeToDswp(node *v1.Node, nodeName types.NodeName) { - if _, exists := node.Annotations[volumehelper.ControllerManagedAttachAnnotation]; exists { + if _, exists := node.Annotations[volumeutil.ControllerManagedAttachAnnotation]; exists { keepTerminatedPodVolumes := false - if t, ok := node.Annotations[volumehelper.KeepTerminatedPodVolumesAnnotation]; ok { + if t, ok := node.Annotations[volumeutil.KeepTerminatedPodVolumesAnnotation]; ok { keepTerminatedPodVolumes = (t == "true") } diff --git a/pkg/controller/volume/attachdetach/cache/BUILD b/pkg/controller/volume/attachdetach/cache/BUILD index 794c85e4a6..76f3b46b59 100644 --- a/pkg/controller/volume/attachdetach/cache/BUILD +++ b/pkg/controller/volume/attachdetach/cache/BUILD @@ -15,9 +15,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", deps = [ "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 59fba9ca6b..985b29962d 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -31,8 +31,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // ActualStateOfWorld defines a set of thread-safe operations supported on @@ -275,7 +275,7 @@ func (asw *actualStateOfWorld) AddVolumeNode( err) } - volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec( + volumeName, err = util.GetUniqueVolumeNameFromSpec( attachableVolumePlugin, volumeSpec) if err != nil { return "", fmt.Errorf( diff --git a/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go b/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go index 8e9fdd7804..520d2ca38b 100644 --- a/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/desired_state_of_world.go @@ -28,9 +28,9 @@ import ( "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // DesiredStateOfWorld defines a set of thread-safe operations supported on @@ -231,7 +231,7 @@ func (dsw *desiredStateOfWorld) AddPod( err) } - volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec( + volumeName, err := util.GetUniqueVolumeNameFromSpec( attachableVolumePlugin, volumeSpec) if err != nil { return "", fmt.Errorf( diff --git a/pkg/controller/volume/attachdetach/populator/BUILD b/pkg/controller/volume/attachdetach/populator/BUILD index 0f88c9bf36..1ed2bce09e 100644 --- a/pkg/controller/volume/attachdetach/populator/BUILD +++ b/pkg/controller/volume/attachdetach/populator/BUILD @@ -14,7 +14,7 @@ go_library( "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/controller/volume/attachdetach/util:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -46,7 +46,7 @@ go_test( "//pkg/controller:go_default_library", "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/volume/testing:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go index 4de8adc645..4065e25a80 100644 --- a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go +++ b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go @@ -33,7 +33,7 @@ import ( "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + volutil "k8s.io/kubernetes/pkg/volume/util" ) // DesiredStateOfWorldPopulator periodically verifies that the pods in the @@ -133,7 +133,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { true /* default volume action */) if volumeActionFlag { - informerPodUID := volumehelper.GetUniquePodName(informerPod) + informerPodUID := volutil.GetUniquePodName(informerPod) // Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer if informerPodUID == dswPodUID { glog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID) @@ -158,7 +158,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods() { dswp.timeOfLastListPods = time.Now() for _, pod := range pods { - if volumehelper.IsPodTerminated(pod, pod.Status) { + if volutil.IsPodTerminated(pod, pod.Status) { // Do not add volumes for terminated pods continue } diff --git a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator_test.go b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator_test.go index 56f9f8c5f2..9e07d29c60 100644 --- a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator_test.go +++ b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" volumetesting "k8s.io/kubernetes/pkg/volume/testing" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) { @@ -66,7 +66,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) { fakePodInformer.Informer().GetStore().Add(pod) - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name diff --git a/pkg/controller/volume/attachdetach/testing/BUILD b/pkg/controller/volume/attachdetach/testing/BUILD index 67e6ccae8a..968d3cf533 100644 --- a/pkg/controller/volume/attachdetach/testing/BUILD +++ b/pkg/controller/volume/attachdetach/testing/BUILD @@ -11,7 +11,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing", deps = [ "//pkg/volume:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/volume/attachdetach/testing/testvolumespec.go b/pkg/controller/volume/attachdetach/testing/testvolumespec.go index a20f450742..06aad75f75 100644 --- a/pkg/controller/volume/attachdetach/testing/testvolumespec.go +++ b/pkg/controller/volume/attachdetach/testing/testvolumespec.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) const TestPluginName = "kubernetes.io/testPlugin" @@ -142,7 +142,7 @@ func CreateTestClient() *fake.Clientset { "name": nodeName, }, Annotations: map[string]string{ - volumehelper.ControllerManagedAttachAnnotation: "true", + util.ControllerManagedAttachAnnotation: "true", }, }, Status: v1.NodeStatus{ diff --git a/pkg/controller/volume/attachdetach/util/BUILD b/pkg/controller/volume/attachdetach/util/BUILD index 06848fecca..f74cdd3f12 100644 --- a/pkg/controller/volume/attachdetach/util/BUILD +++ b/pkg/controller/volume/attachdetach/util/BUILD @@ -12,7 +12,7 @@ go_library( deps = [ "//pkg/controller/volume/attachdetach/cache:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/controller/volume/attachdetach/util/util.go b/pkg/controller/volume/attachdetach/util/util.go index 370486b361..6f3dfd0a78 100644 --- a/pkg/controller/volume/attachdetach/util/util.go +++ b/pkg/controller/volume/attachdetach/util/util.go @@ -25,7 +25,7 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) // CreateVolumeSpec creates and returns a mutatable volume.Spec object for the @@ -150,7 +150,7 @@ func DetermineVolumeAction(pod *v1.Pod, desiredStateOfWorld cache.DesiredStateOf nodeName := types.NodeName(pod.Spec.NodeName) keepTerminatedPodVolume := desiredStateOfWorld.GetKeepTerminatedPodVolumesForNode(nodeName) - if volumehelper.IsPodTerminated(pod, pod.Status) { + if util.IsPodTerminated(pod, pod.Status) { // if pod is terminate we let kubelet policy dictate if volume // should be detached or not return keepTerminatedPodVolume @@ -216,7 +216,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D continue } - uniquePodName := volumehelper.GetUniquePodName(pod) + uniquePodName := util.GetUniquePodName(pod) if addVolumes { // Add volume to desired state of world _, err := desiredStateOfWorld.AddPod( @@ -232,7 +232,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D } else { // Remove volume from desired state of world - uniqueVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec( + uniqueVolumeName, err := util.GetUniqueVolumeNameFromSpec( attachableVolumePlugin, volumeSpec) if err != nil { glog.V(10).Infof( diff --git a/pkg/controller/volume/expand/BUILD b/pkg/controller/volume/expand/BUILD index cb69f9eb50..5a067da6a4 100644 --- a/pkg/controller/volume/expand/BUILD +++ b/pkg/controller/volume/expand/BUILD @@ -23,6 +23,7 @@ go_library( "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/volume/expand/expand_controller.go b/pkg/controller/volume/expand/expand_controller.go index 7be32384ad..e41785f34c 100644 --- a/pkg/controller/volume/expand/expand_controller.go +++ b/pkg/controller/volume/expand/expand_controller.go @@ -42,8 +42,8 @@ import ( "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) const ( @@ -118,7 +118,7 @@ func NewExpandController( eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"}) - blkutil := util.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() expc.opExecutor = operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator( kubeClient, diff --git a/pkg/controller/volume/persistentvolume/BUILD b/pkg/controller/volume/persistentvolume/BUILD index 9f38d9acc1..5367ef78fa 100644 --- a/pkg/controller/volume/persistentvolume/BUILD +++ b/pkg/controller/volume/persistentvolume/BUILD @@ -32,7 +32,7 @@ go_library( "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/recyclerclient:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", @@ -80,6 +80,8 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/controller:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", + "//pkg/volume/util/recyclerclient:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 441309e6b5..5197f053c9 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -49,6 +49,7 @@ import ( "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/controller" vol "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util/recyclerclient" ) // This is a unit test framework for persistent volume controller. @@ -1262,7 +1263,7 @@ func (plugin *mockVolumePlugin) GetMetrics() (*vol.Metrics, error) { // Recycler interfaces -func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder vol.RecycleEventRecorder) error { +func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error { if len(plugin.recycleCalls) == 0 { return fmt.Errorf("Mock plugin error: no recycleCalls configured") } diff --git a/pkg/controller/volume/persistentvolume/index.go b/pkg/controller/volume/persistentvolume/index.go index 9168d59093..a23cd6faa0 100644 --- a/pkg/controller/volume/persistentvolume/index.go +++ b/pkg/controller/volume/persistentvolume/index.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/tools/cache" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -321,7 +320,7 @@ func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requ keys := pvIndex.store.ListIndexFuncValues("accessmodes") for _, key := range keys { indexedModes := v1helper.GetAccessModesFromString(key) - if volume.AccessModesContainedInAll(indexedModes, requestedModes) { + if volumeutil.AccessModesContainedInAll(indexedModes, requestedModes) { matchedModes = append(matchedModes, indexedModes) } } diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index 80e770bd3b..caace043be 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" ref "k8s.io/client-go/tools/reference" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" ) func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim { @@ -304,7 +304,7 @@ func TestAllPossibleAccessModes(t *testing.T) { t.Errorf("Expected 3 arrays of modes that match RWO, but got %v", len(possibleModes)) } for _, m := range possibleModes { - if !volume.AccessModesContains(m, v1.ReadWriteOnce) { + if !util.AccessModesContains(m, v1.ReadWriteOnce) { t.Errorf("AccessModes does not contain %s", v1.ReadWriteOnce) } } @@ -313,7 +313,7 @@ func TestAllPossibleAccessModes(t *testing.T) { if len(possibleModes) != 1 { t.Errorf("Expected 1 array of modes that match RWX, but got %v", len(possibleModes)) } - if !volume.AccessModesContains(possibleModes[0], v1.ReadWriteMany) { + if !util.AccessModesContains(possibleModes[0], v1.ReadWriteMany) { t.Errorf("AccessModes does not contain %s", v1.ReadWriteOnce) } diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 5879abfc76..c245fb15bb 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -45,7 +45,7 @@ import ( "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" vol "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/recyclerclient" "github.com/golang/glog" ) @@ -1262,7 +1262,7 @@ func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([ return nil, false, fmt.Errorf("error listing pods: %s", err) } for _, pod := range pods { - if volumehelper.IsPodTerminated(pod, pod.Status) { + if util.IsPodTerminated(pod, pod.Status) { continue } for i := range pod.Spec.Volumes { @@ -1550,7 +1550,7 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, // newRecyclerEventRecorder returns a RecycleEventRecorder that sends all events // to given volume. -func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *v1.PersistentVolume) vol.RecycleEventRecorder { +func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *v1.PersistentVolume) recyclerclient.RecycleEventRecorder { return func(eventtype, message string) { ctrl.eventRecorder.Eventf(volume, eventtype, events.RecyclerPod, "Recycler pod: %s", message) } diff --git a/pkg/controller/volume/pvcprotection/BUILD b/pkg/controller/volume/pvcprotection/BUILD index 76a0f1561d..7794f30168 100644 --- a/pkg/controller/volume/pvcprotection/BUILD +++ b/pkg/controller/volume/pvcprotection/BUILD @@ -10,7 +10,6 @@ go_library( "//pkg/util/metrics:go_default_library", "//pkg/util/slice:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index 9abd7cf92b..7c1ae1f562 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -35,7 +35,6 @@ import ( "k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/slice" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // Controller is controller that removes PVCProtectionFinalizer @@ -214,7 +213,7 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) { glog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name) continue } - if volumehelper.IsPodTerminated(pod, pod.Status) { + if volumeutil.IsPodTerminated(pod, pod.Status) { // This pod is being unmounted/detached or is already // unmounted/detached. It does not block the PVC from deletion. continue @@ -270,7 +269,7 @@ func (c *Controller) podAddedDeletedUpdated(obj interface{}, deleted bool) { } // Filter out pods that can't help us to remove a finalizer on PVC - if !deleted && !volumehelper.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" { + if !deleted && !volumeutil.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" { return } diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 51244136aa..f0230fbb11 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -105,7 +105,7 @@ go_library( "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//pkg/volume/validation:go_default_library", "//third_party/forked/golang/expansion:go_default_library", "//vendor/github.com/golang/glog:go_default_library", @@ -207,7 +207,7 @@ go_test( "//pkg/volume:go_default_library", "//pkg/volume/host_path:go_default_library", "//pkg/volume/testing:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 8a8e42c919..1acf1565dc 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -46,7 +46,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/version" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + volutil "k8s.io/kubernetes/pkg/volume/util" ) const ( @@ -190,8 +190,8 @@ func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool // whether the existing node must be updated. func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool { var ( - existingCMAAnnotation = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] - newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation] + existingCMAAnnotation = existingNode.Annotations[volutil.ControllerManagedAttachAnnotation] + newCMAAnnotation, newSet = node.Annotations[volutil.ControllerManagedAttachAnnotation] ) if newCMAAnnotation == existingCMAAnnotation { @@ -203,13 +203,13 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v // the correct value of the annotation. if !newSet { glog.Info("Controller attach-detach setting changed to false; updating existing Node") - delete(existingNode.Annotations, volumehelper.ControllerManagedAttachAnnotation) + delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation) } else { glog.Info("Controller attach-detach setting changed to true; updating existing Node") if existingNode.Annotations == nil { existingNode.Annotations = make(map[string]string) } - existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] = newCMAAnnotation + existingNode.Annotations[volutil.ControllerManagedAttachAnnotation] = newCMAAnnotation } return true @@ -270,7 +270,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { } glog.Infof("Setting node annotation to enable volume controller attach/detach") - node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true" + node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true" } else { glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes") } @@ -280,7 +280,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { node.Annotations = make(map[string]string) } glog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node") - node.Annotations[volumehelper.KeepTerminatedPodVolumesAnnotation] = "true" + node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true" } // @question: should this be place after the call to the cloud provider? which also applies labels diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 0968b79932..3eba4c4dd7 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -53,7 +53,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/sliceutils" "k8s.io/kubernetes/pkg/version" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) const ( @@ -937,7 +937,7 @@ func TestTryRegisterWithApiServer(t *testing.T) { if cmad { node.Annotations = make(map[string]string) - node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true" + node.Annotations[util.ControllerManagedAttachAnnotation] = "true" } return node @@ -1089,7 +1089,7 @@ func TestTryRegisterWithApiServer(t *testing.T) { require.NoError(t, err) } - actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[volumehelper.ControllerManagedAttachAnnotation]) + actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation]) assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name) } } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index bc8dccdfcd..0bad08d61e 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -61,9 +61,8 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" utilfile "k8s.io/kubernetes/pkg/util/file" - "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" volumevalidation "k8s.io/kubernetes/pkg/volume/validation" "k8s.io/kubernetes/third_party/forked/golang/expansion" ) @@ -129,7 +128,7 @@ func makeAbsolutePath(goos, path string) string { // makeBlockVolumes maps the raw block devices specified in the path of the container // Experimental -func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumeutil.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) { +func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) { var devices []kubecontainer.DeviceInfo for _, device := range container.VolumeDevices { // check path is absolute @@ -188,7 +187,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h vol.SELinuxLabeled = true relabelVolume = true } - hostPath, err := volume.GetPath(vol.Mounter) + hostPath, err := volumeutil.GetPath(vol.Mounter) if err != nil { return nil, err } @@ -451,7 +450,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai return nil, err } opts.Hostname = hostname - podName := volumehelper.GetUniquePodName(pod) + podName := volumeutil.GetUniquePodName(pod) volumes := kl.volumeManager.GetMountedVolumesForPod(podName) opts.PortMappings = kubecontainer.MakePortMappings(container) @@ -464,7 +463,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai // TODO: remove feature gate check after no longer needed if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - blkutil := volumeutil.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil) if err != nil { return nil, err diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index ad59b34d39..535d8598ca 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -73,7 +73,7 @@ import ( "k8s.io/kubernetes/pkg/volume" _ "k8s.io/kubernetes/pkg/volume/host_path" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) func init() { @@ -2134,7 +2134,7 @@ func waitForVolumeUnmount( func() (bool, error) { // Verify volumes detached podVolumes = volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) + util.GetUniquePodName(pod)) if len(podVolumes) != 0 { return false, nil diff --git a/pkg/kubelet/kubelet_volumes_test.go b/pkg/kubelet/kubelet_volumes_test.go index 60f1e70287..dc2c89a660 100644 --- a/pkg/kubelet/kubelet_volumes_test.go +++ b/pkg/kubelet/kubelet_volumes_test.go @@ -28,7 +28,7 @@ import ( core "k8s.io/client-go/testing" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) func TestListVolumesForPod(t *testing.T) { @@ -64,7 +64,7 @@ func TestListVolumesForPod(t *testing.T) { err := kubelet.volumeManager.WaitForAttachAndMount(pod) assert.NoError(t, err) - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName)) assert.True(t, volumeExsit, "expected to find volumes for pod %q", podName) @@ -180,7 +180,7 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) { assert.NoError(t, err) podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) + util.GetUniquePodName(pod)) expectedPodVolumes := []string{"vol1"} assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) @@ -227,7 +227,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { assert.NoError(t, err) podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) + util.GetUniquePodName(pod)) expectedPodVolumes := []string{"vol1"} assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) @@ -252,7 +252,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { // Verify volumes unmounted podVolumes = kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) + util.GetUniquePodName(pod)) assert.Len(t, podVolumes, 0, "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes) @@ -317,7 +317,7 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) { assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod)) podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) + util.GetUniquePodName(pod)) expectedPodVolumes := []string{"vol1"} assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) @@ -386,7 +386,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod)) podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) + util.GetUniquePodName(pod)) expectedPodVolumes := []string{"vol1"} assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) @@ -410,7 +410,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { // Verify volumes unmounted podVolumes = kubelet.volumeManager.GetMountedVolumesForPod( - volumehelper.GetUniquePodName(pod)) + util.GetUniquePodName(pod)) assert.Len(t, podVolumes, 0, "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes) diff --git a/pkg/kubelet/volume_host.go b/pkg/kubelet/volume_host.go index de71e3c4b0..cf1fc77deb 100644 --- a/pkg/kubelet/volume_host.go +++ b/pkg/kubelet/volume_host.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" ) // NewInitializedVolumePluginMgr returns a new instance of @@ -94,7 +95,7 @@ func (kvh *kubeletVolumeHost) GetVolumeDevicePluginDir(pluginName string) string func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string { dir := kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName) if runtime.GOOS == "windows" { - dir = volume.GetWindowsPath(dir) + dir = util.GetWindowsPath(dir) } return dir } diff --git a/pkg/kubelet/volumemanager/BUILD b/pkg/kubelet/volumemanager/BUILD index d0135589fa..3f9d35d462 100644 --- a/pkg/kubelet/volumemanager/BUILD +++ b/pkg/kubelet/volumemanager/BUILD @@ -24,7 +24,7 @@ go_library( "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -52,8 +52,8 @@ go_test( "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/pkg/kubelet/volumemanager/cache/BUILD b/pkg/kubelet/volumemanager/cache/BUILD index b4cf73fffc..bac59a12bd 100644 --- a/pkg/kubelet/volumemanager/cache/BUILD +++ b/pkg/kubelet/volumemanager/cache/BUILD @@ -15,9 +15,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", deps = [ "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", @@ -34,8 +34,8 @@ go_test( deps = [ "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 9243ef3329..140c434fe6 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -29,9 +29,9 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // ActualStateOfWorld defines a set of thread-safe operations for the kubelet @@ -358,7 +358,7 @@ func (asw *actualStateOfWorld) addVolume( } if len(volumeName) == 0 { - volumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec) + volumeName, err = util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec) if err != nil { return fmt.Errorf( "failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v", diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go index 6a7f561a13..634a3328c8 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go @@ -23,8 +23,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" + "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) var emptyVolumeName = v1.UniqueVolumeName("") @@ -56,7 +56,7 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} devicePath := "fake/device/path" - generatedVolumeName, _ := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) + generatedVolumeName, _ := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) // Act err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) @@ -143,7 +143,7 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) { }, } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - generatedVolumeName, _ := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) + generatedVolumeName, _ := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { @@ -191,13 +191,13 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) { }, } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) + generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{}) if err != nil { @@ -255,14 +255,14 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec( + generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec( plugin, volumeSpec) err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{}) if err != nil { @@ -339,10 +339,10 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) { err) } - volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec( + volumeName, err := util.GetUniqueVolumeNameFromSpec( plugin, volumeSpec) - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{}) if err != nil { @@ -404,7 +404,7 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) { volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} devicePath := "fake/device/path" deviceMountPath := "fake/device/mount/path" - generatedVolumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) + generatedVolumeName, err := util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index 812c885939..a61752d8c6 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -26,9 +26,9 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // DesiredStateOfWorld defines a set of thread-safe operations for the kubelet @@ -206,7 +206,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( // For attachable volumes, use the unique volume name as reported by // the plugin. volumeName, err = - volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec) + util.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec) if err != nil { return "", fmt.Errorf( "failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v", @@ -217,7 +217,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( } else { // For non-attachable volumes, generate a unique name based on the pod // namespace and name and the name of the volume within the pod. - volumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec) + volumeName = util.GetUniqueVolumeNameForNonAttachableVolume(podName, volumePlugin, volumeSpec) } volumeObj, volumeExists := dsw.volumesToMount[volumeName] diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go index 849a607fec..42707169d9 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go @@ -23,8 +23,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" + "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // Calls AddPodToVolume() to add new pod to new volume @@ -54,7 +54,7 @@ func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) // Act generatedVolumeName, err := dsw.AddPodToVolume( @@ -99,7 +99,7 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) // Act generatedVolumeName, err := dsw.AddPodToVolume( @@ -144,7 +144,7 @@ func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) if err != nil { @@ -197,7 +197,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { } volume1Spec := &volume.Spec{Volume: &pod1.Spec.Volumes[0]} - pod1Name := volumehelper.GetUniquePodName(pod1) + pod1Name := util.GetUniquePodName(pod1) pod2 := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -219,7 +219,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { } volume2Spec := &volume.Spec{Volume: &pod2.Spec.Volumes[0]} - pod2Name := volumehelper.GetUniquePodName(pod2) + pod2Name := util.GetUniquePodName(pod2) pod3 := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -241,7 +241,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { } volume3Spec := &volume.Spec{Volume: &pod3.Spec.Volumes[0]} - pod3Name := volumehelper.GetUniquePodName(pod3) + pod3Name := util.GetUniquePodName(pod3) generatedVolume1Name, err := dsw.AddPodToVolume( pod1Name, pod1, volume1Spec, volume1Spec.Name(), "" /* volumeGidValue */) diff --git a/pkg/kubelet/volumemanager/populator/BUILD b/pkg/kubelet/volumemanager/populator/BUILD index 4cd3412d8c..c44312cb7f 100644 --- a/pkg/kubelet/volumemanager/populator/BUILD +++ b/pkg/kubelet/volumemanager/populator/BUILD @@ -19,8 +19,8 @@ go_library( "//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -58,8 +58,8 @@ go_test( "//pkg/kubelet/status/testing:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 4f3967afbe..a00d605c08 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -41,8 +41,8 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // DesiredStateOfWorldPopulator periodically loops through the list of active @@ -176,7 +176,7 @@ func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool { if !found { podStatus = pod.Status } - return volumehelper.IsPodTerminated(pod, podStatus) + return util.IsPodTerminated(pod, podStatus) } // Iterate through all pods and add to desired state of world if they don't @@ -260,7 +260,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) { return } - uniquePodName := volumehelper.GetUniquePodName(pod) + uniquePodName := util.GetUniquePodName(pod) if dswp.podPreviouslyProcessed(uniquePodName) { return } @@ -393,7 +393,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( // TODO: remove feature gate check after no longer needed if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - volumeMode, err := volumehelper.GetVolumeMode(volumeSpec) + volumeMode, err := util.GetVolumeMode(volumeSpec) if err != nil { return nil, "", err } @@ -525,7 +525,7 @@ func (dswp *desiredStateOfWorldPopulator) makeVolumeMap(containers []v1.Containe } func getPVVolumeGidAnnotationValue(pv *v1.PersistentVolume) string { - if volumeGid, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]; ok { + if volumeGid, ok := pv.Annotations[util.VolumeGidAnnotationKey]; ok { return volumeGid } diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go index 992116c4ad..f2fcf19723 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go @@ -35,8 +35,8 @@ import ( statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" volumetesting "k8s.io/kubernetes/pkg/volume/testing" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) { @@ -74,7 +74,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods(t *testing.T) { fakePodManager.AddPod(pod) - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name @@ -184,7 +184,7 @@ func TestFindAndAddNewPods_FindAndRemoveDeletedPods_Valid_Block_VolumeDevices(t fakePodManager.AddPod(pod) - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name diff --git a/pkg/kubelet/volumemanager/reconciler/BUILD b/pkg/kubelet/volumemanager/reconciler/BUILD index b5c657a489..b2a41d0006 100644 --- a/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/pkg/kubelet/volumemanager/reconciler/BUILD @@ -19,10 +19,10 @@ go_library( "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/nestedpendingoperations:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -42,8 +42,8 @@ go_test( "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", "//pkg/volume/util/operationexecutor:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 9e65b30d39..dfde00baf4 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -41,10 +41,10 @@ import ( "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" volumepkg "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // Reconciler runs a periodic loop to reconcile the desired state of the world @@ -445,12 +445,12 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, var uniqueVolumeName v1.UniqueVolumeName if attachablePlugin != nil { - uniqueVolumeName, err = volumehelper.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) + uniqueVolumeName, err = util.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) if err != nil { return nil, err } } else { - uniqueVolumeName = volumehelper.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec) + uniqueVolumeName = util.GetUniqueVolumeNameForNonAttachableVolume(volume.podName, plugin, volumeSpec) } // Check existence of mount point for filesystem volume or symbolic link for block volume isExist, checkErr := rc.operationExecutor.CheckVolumeExistenceOperation(volumeSpec, volume.mountPath, volumeSpec.Name(), rc.mounter, uniqueVolumeName, volume.podName, pod.UID, attachablePlugin) diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index 22b49d0248..0c6a20af6e 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -36,8 +36,8 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) const ( @@ -149,7 +149,7 @@ func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) @@ -227,7 +227,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName}) @@ -306,7 +306,7 @@ func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) @@ -396,7 +396,7 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) { } volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) @@ -491,7 +491,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) { volumeSpec := &volume.Spec{ PersistentVolume: gcepv, } - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) @@ -582,7 +582,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) { volumeSpec := &volume.Spec{ PersistentVolume: gcepv, } - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName}) @@ -674,7 +674,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) { volumeSpec := &volume.Spec{ PersistentVolume: gcepv, } - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) @@ -776,7 +776,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) { volumeSpec := &volume.Spec{ PersistentVolume: gcepv, } - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) generatedVolumeName, err := dsw.AddPodToVolume( podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */) diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index d8a1079858..8668972ba0 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -43,7 +43,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) const ( @@ -169,7 +169,7 @@ func NewVolumeManager( volumePluginMgr, recorder, checkNodeCapabilitiesBeforeMount, - util.NewBlockVolumePathHandler())), + volumepathhandler.NewBlockVolumePathHandler())), } vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator( @@ -264,7 +264,7 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co } func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { - podName := volumehelper.GetUniquePodName(pod) + podName := util.GetUniquePodName(pod) supplementalGroups := sets.NewString() for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) { @@ -340,7 +340,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { } glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod)) - uniquePodName := volumehelper.GetUniquePodName(pod) + uniquePodName := util.GetUniquePodName(pod) // Some pods expect to have Setup called over and over again to update. // Remount plugins for which this is true. (Atomically updating volumes, diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index 8e8bf368b7..4f15e80d9c 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -41,8 +41,8 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) const ( @@ -168,7 +168,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "pvA", Annotations: map[string]string{ - volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation, + util.VolumeGidAnnotationKey: tc.gidAnnotation, }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/BUILD b/pkg/volume/BUILD index 72079f5740..ffcd7bc4de 100644 --- a/pkg/volume/BUILD +++ b/pkg/volume/BUILD @@ -1,10 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -16,7 +10,6 @@ go_library( "metrics_nil.go", "metrics_statfs.go", "plugins.go", - "util.go", "volume.go", ] + select({ "@io_bazel_rules_go//go/platform:android": [ @@ -55,22 +48,20 @@ go_library( "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/volume", + visibility = ["//visibility:public"], deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/util/io:go_default_library", "//pkg/util/mount:go_default_library", - "//pkg/volume/util:go_default_library", + "//pkg/volume/util/fs:go_default_library", + "//pkg/volume/util/recyclerclient:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", ], ) @@ -80,19 +71,12 @@ go_test( srcs = [ "metrics_nil_test.go", "plugins_test.go", - "util_test.go", ], embed = [":go_default_library"], deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/util/slice:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", ], ) @@ -162,4 +146,5 @@ filegroup( "//pkg/volume/vsphere_volume:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/pkg/volume/aws_ebs/BUILD b/pkg/volume/aws_ebs/BUILD index fe930d302f..5354b9e31b 100644 --- a/pkg/volume/aws_ebs/BUILD +++ b/pkg/volume/aws_ebs/BUILD @@ -23,7 +23,7 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/volume/aws_ebs/attacher.go b/pkg/volume/aws_ebs/attacher.go index 2c58f22a69..059431e166 100644 --- a/pkg/volume/aws_ebs/attacher.go +++ b/pkg/volume/aws_ebs/attacher.go @@ -30,7 +30,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type awsElasticBlockStoreAttacher struct { @@ -219,8 +218,8 @@ func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, dev options = append(options, "ro") } if notMnt { - diskMounter := volumehelper.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host) - mountOptions := volume.MountOptionFromSpec(spec, options...) + diskMounter := volumeutil.NewSafeFormatAndMountFromHost(awsElasticBlockStorePluginName, attacher.host) + mountOptions := volumeutil.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) if err != nil { os.Remove(deviceMountPath) diff --git a/pkg/volume/aws_ebs/aws_ebs.go b/pkg/volume/aws_ebs/aws_ebs.go index 9a0091928e..869d06773c 100644 --- a/pkg/volume/aws_ebs/aws_ebs.go +++ b/pkg/volume/aws_ebs/aws_ebs.go @@ -34,7 +34,6 @@ import ( kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // This is the primary entrypoint for volume plugins. @@ -134,7 +133,7 @@ func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, }, fsType: fsType, readOnly: readOnly, - diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil } func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { @@ -456,7 +455,7 @@ type awsElasticBlockStoreProvisioner struct { var _ volume.Provisioner = &awsElasticBlockStoreProvisioner{} func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } @@ -475,7 +474,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision() (*v1.PersistentVolume, err Name: c.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "aws-ebs-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "aws-ebs-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/aws_ebs/aws_ebs_block.go b/pkg/volume/aws_ebs/aws_ebs_block.go index 5f55358dc6..d104bafa5e 100644 --- a/pkg/volume/aws_ebs/aws_ebs_block.go +++ b/pkg/volume/aws_ebs/aws_ebs_block.go @@ -30,7 +30,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{} @@ -41,7 +41,7 @@ var _ volume.ProvisionableVolumePlugin = &awsElasticBlockStorePlugin{} func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { pluginDir := plugin.host.GetVolumeDevicePluginDir(awsElasticBlockStorePluginName) - blkutil := util.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) if err != nil { return nil, err diff --git a/pkg/volume/aws_ebs/aws_util.go b/pkg/volume/aws_ebs/aws_util.go index 932617d4e9..94f9dd30ec 100644 --- a/pkg/volume/aws_ebs/aws_util.go +++ b/pkg/volume/aws_ebs/aws_util.go @@ -80,12 +80,12 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (aws.K } else { tags = *c.options.CloudTags } - tags["Name"] = volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters + tags["Name"] = volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // AWS tags can have 255 characters capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() // AWS works with gigabytes, convert to GiB with rounding up - requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) + requestGB := int(volumeutil.RoundUpSize(requestBytes, 1024*1024*1024)) volumeOptions := &aws.VolumeOptions{ CapacityGB: requestGB, Tags: tags, diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index ae188fecf5..163c75e252 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -60,7 +60,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 397c2055d8..c422ffd5da 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -36,8 +36,7 @@ import ( "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) type azureDiskDetacher struct { @@ -249,8 +248,8 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str options := []string{} if notMnt { - diskMounter := volumehelper.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host) - mountOptions := volume.MountOptionFromSpec(spec, options...) + diskMounter := util.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, attacher.plugin.host) + mountOptions := util.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions) if err != nil { if cleanErr := os.Remove(deviceMountPath); cleanErr != nil { @@ -295,7 +294,7 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro // UnmountDevice unmounts the volume on the node func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { - err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName())) + err := util.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName())) if err == nil { glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath) } else { diff --git a/pkg/volume/azure_dd/azure_mounter.go b/pkg/volume/azure_dd/azure_mounter.go index da703975cb..514a6dcb10 100644 --- a/pkg/volume/azure_dd/azure_mounter.go +++ b/pkg/volume/azure_dd/azure_mounter.go @@ -116,7 +116,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { } if m.options.MountOptions != nil { - options = volume.JoinMountOptions(m.options.MountOptions, options) + options = util.JoinMountOptions(m.options.MountOptions, options) } glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) diff --git a/pkg/volume/azure_dd/azure_provision.go b/pkg/volume/azure_dd/azure_provision.go index 5f11743d52..72b7f33625 100644 --- a/pkg/volume/azure_dd/azure_provision.go +++ b/pkg/volume/azure_dd/azure_provision.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" ) type azureDiskProvisioner struct { @@ -65,7 +66,7 @@ func (d *azureDiskDeleter) Delete() error { } func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) } supportedModes := p.plugin.GetAccessModes() @@ -93,10 +94,10 @@ func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { err error ) // maxLength = 79 - (4 for ".vhd") = 75 - name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75) + name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75) capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() - requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) + requestGB := int(util.RoundUpSize(requestBytes, 1024*1024*1024)) for k, v := range p.options.Parameters { switch strings.ToLower(k) { diff --git a/pkg/volume/azure_file/BUILD b/pkg/volume/azure_file/BUILD index dfb444765d..712b69f02b 100644 --- a/pkg/volume/azure_file/BUILD +++ b/pkg/volume/azure_file/BUILD @@ -22,7 +22,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/pkg/volume/azure_file/azure_file.go b/pkg/volume/azure_file/azure_file.go index 7b925495a6..c0e29d9161 100644 --- a/pkg/volume/azure_file/azure_file.go +++ b/pkg/volume/azure_file/azure_file.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" + volutil "k8s.io/kubernetes/pkg/volume/util" ) // ProbeVolumePlugins is the primary endpoint for volume plugins @@ -122,7 +122,7 @@ func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod secretName: secretName, shareName: share, readOnly: readOnly, - mountOptions: volume.MountOptionFromSpec(spec), + mountOptions: volutil.MountOptionFromSpec(spec), }, nil } @@ -168,7 +168,7 @@ func (plugin *azureFilePlugin) ExpandVolumeDevice( return oldSize, err } - if err := azure.ResizeFileShare(accountName, accountKey, shareName, int(volume.RoundUpToGiB(newSize))); err != nil { + if err := azure.ResizeFileShare(accountName, accountKey, shareName, int(volutil.RoundUpToGiB(newSize))); err != nil { return oldSize, err } @@ -262,7 +262,7 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error { if b.readOnly { options = append(options, "ro") } - mountOptions = volume.JoinMountOptions(b.mountOptions, options) + mountOptions = volutil.JoinMountOptions(b.mountOptions, options) mountOptions = appendDefaultMountOptions(mountOptions, fsGroup) } @@ -306,7 +306,7 @@ func (c *azureFileUnmounter) TearDown() error { } func (c *azureFileUnmounter) TearDownAt(dir string) error { - return util.UnmountPath(dir, c.mounter) + return volutil.UnmountPath(dir, c.mounter) } func getVolumeSource(spec *volume.Spec) (string, bool, error) { diff --git a/pkg/volume/azure_file/azure_provision.go b/pkg/volume/azure_file/azure_provision.go index 6386d5354e..dc155334ff 100644 --- a/pkg/volume/azure_file/azure_provision.go +++ b/pkg/volume/azure_file/azure_provision.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) var _ volume.DeletableVolumePlugin = &azureFilePlugin{} @@ -132,18 +132,18 @@ type azureFileProvisioner struct { var _ volume.Provisioner = &azureFileProvisioner{} func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes()) } var sku, location, account string // File share name has a length limit of 63, and it cannot contain two consecutive '-'s. - name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63) + name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63) name = strings.Replace(name, "--", "-", -1) capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() - requestGiB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) + requestGiB := int(util.RoundUpSize(requestBytes, 1024*1024*1024)) secretNamespace := a.options.PVC.Namespace // Apply ProvisionerParameters (case-insensitive). We leave validation of // the values to the cloud provider. @@ -182,7 +182,7 @@ func (a *azureFileProvisioner) Provision() (*v1.PersistentVolume, error) { Name: a.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "azure-file-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "azure-file-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index 2d97708c84..426efcc2c8 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -148,7 +148,7 @@ func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.U readonly: readOnly, mounter: mounter, plugin: plugin, - mountOptions: volume.MountOptionFromSpec(spec), + mountOptions: util.MountOptionFromSpec(spec), }, }, nil } @@ -323,7 +323,7 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error { } src += hosts[i] + ":" + cephfsVolume.path - mountOptions := volume.JoinMountOptions(cephfsVolume.mountOptions, opt) + mountOptions := util.JoinMountOptions(cephfsVolume.mountOptions, opt) if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", mountOptions); err != nil { return fmt.Errorf("CephFS: mount failed: %v", err) } diff --git a/pkg/volume/cinder/BUILD b/pkg/volume/cinder/BUILD index d092b5bf1a..9bd619cc44 100644 --- a/pkg/volume/cinder/BUILD +++ b/pkg/volume/cinder/BUILD @@ -24,7 +24,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index bc15a168fb..bfe66d5660 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -31,7 +31,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type cinderDiskAttacher struct { @@ -286,8 +285,8 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st options = append(options, "ro") } if notMnt { - diskMounter := volumehelper.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host) - mountOptions := volume.MountOptionFromSpec(spec, options...) + diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host) + mountOptions := volumeutil.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) if err != nil { os.Remove(deviceMountPath) diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index fa3fd95b03..1ee17ba2fd 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -34,7 +34,6 @@ import ( kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) const ( @@ -145,7 +144,7 @@ func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.U }, fsType: fsType, readOnly: readOnly, - blockDeviceMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + blockDeviceMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil } func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { @@ -501,7 +500,7 @@ type cinderVolumeProvisioner struct { var _ volume.Provisioner = &cinderVolumeProvisioner{} func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } @@ -515,7 +514,7 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { Name: c.options.PVName, Labels: labels, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "cinder-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index 323690fa8c..8a5e25d8da 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -32,6 +32,7 @@ import ( clientset "k8s.io/client-go/kubernetes" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume" + volutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/utils/exec" ) @@ -170,8 +171,8 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() // Cinder works with gigabytes, convert to GiB with rounding up - volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) - name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters + volSizeGB := int(volutil.RoundUpSize(volSizeBytes, 1024*1024*1024)) + name := volutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 255) // Cinder volume name can have up to 255 characters vtype := "" availability := "" // Apply ProvisionerParameters (case-insensitive). We leave validation of @@ -203,7 +204,7 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, // if we did not get any zones, lets leave it blank and gophercloud will // use zone "nova" as default if len(zones) > 0 { - availability = volume.ChooseZoneForVolume(zones, c.options.PVC.Name) + availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name) } } diff --git a/pkg/volume/configmap/configmap.go b/pkg/volume/configmap/configmap.go index 476b2bff99..cbc1fc1ac8 100644 --- a/pkg/volume/configmap/configmap.go +++ b/pkg/volume/configmap/configmap.go @@ -313,7 +313,7 @@ func (c *configMapVolumeUnmounter) TearDown() error { } func (c *configMapVolumeUnmounter) TearDownAt(dir string) error { - return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) + return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) } func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) { diff --git a/pkg/volume/downwardapi/downwardapi.go b/pkg/volume/downwardapi/downwardapi.go index 0b920b0505..4121f9b51a 100644 --- a/pkg/volume/downwardapi/downwardapi.go +++ b/pkg/volume/downwardapi/downwardapi.go @@ -283,7 +283,7 @@ func (c *downwardAPIVolumeUnmounter) TearDown() error { } func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error { - return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) + return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) } func (b *downwardAPIVolumeMounter) getMetaDir() string { diff --git a/pkg/volume/fc/BUILD b/pkg/volume/fc/BUILD index 665c400825..4d3374ae9d 100644 --- a/pkg/volume/fc/BUILD +++ b/pkg/volume/fc/BUILD @@ -22,7 +22,7 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/volume/fc/attacher.go b/pkg/volume/fc/attacher.go index ff034c58e6..77549e9b5b 100644 --- a/pkg/volume/fc/attacher.go +++ b/pkg/volume/fc/attacher.go @@ -31,7 +31,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type fcAttacher struct { @@ -113,7 +112,7 @@ func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, de } if notMnt { diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(fcPluginName)} - mountOptions := volume.MountOptionFromSpec(spec, options...) + mountOptions := volumeutil.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) if err != nil { os.Remove(deviceMountPath) @@ -189,7 +188,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun } // TODO: remove feature gate check after no longer needed if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - volumeMode, err := volumehelper.GetVolumeMode(spec) + volumeMode, err := volumeutil.GetVolumeMode(spec) if err != nil { return nil, err } @@ -199,7 +198,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun fsType: fc.FSType, volumeMode: volumeMode, readOnly: readOnly, - mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host), + mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host), deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), }, nil } @@ -207,7 +206,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun fcDisk: fcDisk, fsType: fc.FSType, readOnly: readOnly, - mounter: volumehelper.NewSafeFormatAndMountFromHost(fcPluginName, host), + mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host), deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()), }, nil } diff --git a/pkg/volume/fc/fc.go b/pkg/volume/fc/fc.go index ebd3e4d125..fb7a570155 100644 --- a/pkg/volume/fc/fc.go +++ b/pkg/volume/fc/fc.go @@ -32,7 +32,7 @@ import ( utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) // This is the primary entrypoint for volume plugins. @@ -133,7 +133,7 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, } // TODO: remove feature gate check after no longer needed if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - volumeMode, err := volumehelper.GetVolumeMode(spec) + volumeMode, err := util.GetVolumeMode(spec) if err != nil { return nil, err } @@ -297,7 +297,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu // globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName) - blkutil := util.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) if err != nil { return nil, err diff --git a/pkg/volume/fc/fc_util.go b/pkg/volume/fc/fc_util.go index ba889551c7..3908275410 100644 --- a/pkg/volume/fc/fc_util.go +++ b/pkg/volume/fc/fc_util.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) type ioHandler interface { @@ -354,14 +355,14 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri // and remove loopback device then it will be remained on kubelet node. To avoid the problem, // local attach plugins needs to remove loopback device during TearDownDevice(). var devices []string - blkUtil := volumeutil.NewBlockVolumePathHandler() + blkUtil := volumepathhandler.NewBlockVolumePathHandler() dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath) if len(dm) != 0 { dstPath = dm } - loop, err := volumeutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath) + loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, dstPath) if err != nil { - if err.Error() != volumeutil.ErrDeviceNotFound { + if err.Error() != volumepathhandler.ErrDeviceNotFound { return fmt.Errorf("fc: failed to get loopback for destination path: %v, err: %v", dstPath, err) } glog.Warning("fc: loopback for destination path: %s not found", dstPath) @@ -389,7 +390,7 @@ func (util *FCUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri } if len(loop) != 0 { // The volume was successfully detached from node. We can safely remove the loopback. - err = volumeutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) + err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) if err != nil { return fmt.Errorf("fc: failed to remove loopback :%v, err: %v", loop, err) } diff --git a/pkg/volume/flexvolume/plugin.go b/pkg/volume/flexvolume/plugin.go index e402e20e70..861ab4a3bb 100644 --- a/pkg/volume/flexvolume/plugin.go +++ b/pkg/volume/flexvolume/plugin.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/utils/exec" ) @@ -103,7 +104,7 @@ func (plugin *flexVolumePlugin) getExecutable() string { execName := parts[len(parts)-1] execPath := path.Join(plugin.execPath, execName) if runtime.GOOS == "windows" { - execPath = volume.GetWindowsPath(execPath) + execPath = util.GetWindowsPath(execPath) } return execPath } diff --git a/pkg/volume/flocker/BUILD b/pkg/volume/flocker/BUILD index 798e0539e5..b1fd9272fb 100644 --- a/pkg/volume/flocker/BUILD +++ b/pkg/volume/flocker/BUILD @@ -21,7 +21,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/clusterhq/flocker-go:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/volume/flocker/flocker_util.go b/pkg/volume/flocker/flocker_util.go index ab58bcef3f..3d9149cd34 100644 --- a/pkg/volume/flocker/flocker_util.go +++ b/pkg/volume/flocker/flocker_util.go @@ -22,7 +22,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/kubernetes/pkg/volume" + + volutil "k8s.io/kubernetes/pkg/volume/util" flockerapi "github.com/clusterhq/flocker-go" "github.com/golang/glog" @@ -73,7 +74,7 @@ func (util *FlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() - volumeSizeGB = int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) + volumeSizeGB = int(volutil.RoundUpSize(requestBytes, 1024*1024*1024)) createOptions := &flockerapi.CreateDatasetOptions{ MaximumSize: requestBytes, diff --git a/pkg/volume/flocker/flocker_volume.go b/pkg/volume/flocker/flocker_volume.go index bdbf311fc2..d7f245d15a 100644 --- a/pkg/volume/flocker/flocker_volume.go +++ b/pkg/volume/flocker/flocker_volume.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) type volumeManager interface { @@ -55,7 +55,7 @@ type flockerVolumeProvisioner struct { var _ volume.Provisioner = &flockerVolumeProvisioner{} func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } @@ -77,7 +77,7 @@ func (c *flockerVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { Name: c.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "flocker-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "flocker-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/gce_pd/BUILD b/pkg/volume/gce_pd/BUILD index 93bb712970..70c6d666c9 100644 --- a/pkg/volume/gce_pd/BUILD +++ b/pkg/volume/gce_pd/BUILD @@ -24,7 +24,7 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -48,6 +48,7 @@ go_test( "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/volume/gce_pd/attacher.go b/pkg/volume/gce_pd/attacher.go index fa6264ebe3..21d8545b08 100644 --- a/pkg/volume/gce_pd/attacher.go +++ b/pkg/volume/gce_pd/attacher.go @@ -32,7 +32,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type gcePersistentDiskAttacher struct { @@ -209,8 +208,8 @@ func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, device options = append(options, "ro") } if notMnt { - diskMounter := volumehelper.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host) - mountOptions := volume.MountOptionFromSpec(spec, options...) + diskMounter := volumeutil.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host) + mountOptions := volumeutil.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) if err != nil { os.Remove(deviceMountPath) diff --git a/pkg/volume/gce_pd/gce_pd.go b/pkg/volume/gce_pd/gce_pd.go index 2b94cb2973..8c78c6754a 100644 --- a/pkg/volume/gce_pd/gce_pd.go +++ b/pkg/volume/gce_pd/gce_pd.go @@ -31,7 +31,6 @@ import ( kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // This is the primary entrypoint for volume plugins. @@ -398,7 +397,7 @@ type gcePersistentDiskProvisioner struct { var _ volume.Provisioner = &gcePersistentDiskProvisioner{} func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } @@ -416,7 +415,7 @@ func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) Name: c.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "gce-pd-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "gce-pd-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/gce_pd/gce_pd_block.go b/pkg/volume/gce_pd/gce_pd_block.go index f870adf975..f4398d13b5 100644 --- a/pkg/volume/gce_pd/gce_pd_block.go +++ b/pkg/volume/gce_pd/gce_pd_block.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) var _ volume.VolumePlugin = &gcePersistentDiskPlugin{} @@ -40,7 +40,7 @@ var _ volume.ExpandableVolumePlugin = &gcePersistentDiskPlugin{} func (plugin *gcePersistentDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { pluginDir := plugin.host.GetVolumeDevicePluginDir(gcePersistentDiskPluginName) - blkutil := util.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) if err != nil { return nil, err diff --git a/pkg/volume/gce_pd/gce_pd_test.go b/pkg/volume/gce_pd/gce_pd_test.go index 1c71d8b765..2f22a204f4 100644 --- a/pkg/volume/gce_pd/gce_pd_test.go +++ b/pkg/volume/gce_pd/gce_pd_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" + "k8s.io/kubernetes/pkg/volume/util" ) func TestCanSupport(t *testing.T) { @@ -176,7 +177,7 @@ func TestPlugin(t *testing.T) { } cap := persistentSpec.Spec.Capacity[v1.ResourceStorage] size := cap.Value() - if size != 100*volume.GB { + if size != 100*util.GB { t.Errorf("Provision() returned unexpected volume size: %v", size) } diff --git a/pkg/volume/gce_pd/gce_util.go b/pkg/volume/gce_pd/gce_util.go index 339112b2bf..db2678a706 100644 --- a/pkg/volume/gce_pd/gce_util.go +++ b/pkg/volume/gce_pd/gce_util.go @@ -82,10 +82,10 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin return "", 0, nil, "", err } - name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters + name := volumeutil.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // GCE PDs are allocated in chunks of GBs (not GiBs) - requestGB := volume.RoundUpToGB(capacity) + requestGB := volumeutil.RoundUpToGB(capacity) // Apply Parameters. // Values for parameter "replication-type" are canonicalized to lower case. @@ -169,13 +169,13 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin } else if zonePresent && !zonesPresent { // 10 - "zone" specified // Use specified zone - if err := volume.ValidateZone(configuredZone); err != nil { + if err := volumeutil.ValidateZone(configuredZone); err != nil { return "", 0, nil, "", err } zones = make(sets.String) zones.Insert(configuredZone) } - zone := volume.ChooseZoneForVolume(zones, c.options.PVC.Name) + zone := volumeutil.ChooseZoneForVolume(zones, c.options.PVC.Name) if err := cloud.CreateDisk( name, @@ -237,7 +237,7 @@ func createRegionalPD( selectedReplicaZones = replicaZones } else { // Must randomly select zones - selectedReplicaZones = volume.ChooseZonesForVolume( + selectedReplicaZones = volumeutil.ChooseZonesForVolume( replicaZones, pvcName, maxRegionalPDZones) } diff --git a/pkg/volume/git_repo/git_repo.go b/pkg/volume/git_repo/git_repo.go index 779bdc5e93..e432befb3c 100644 --- a/pkg/volume/git_repo/git_repo.go +++ b/pkg/volume/git_repo/git_repo.go @@ -264,7 +264,7 @@ func (c *gitRepoVolumeUnmounter) TearDown() error { // TearDownAt simply deletes everything in the directory. func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error { - return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) + return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) } func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) { diff --git a/pkg/volume/glusterfs/BUILD b/pkg/volume/glusterfs/BUILD index ab37127ff6..990ff54c27 100644 --- a/pkg/volume/glusterfs/BUILD +++ b/pkg/volume/glusterfs/BUILD @@ -21,7 +21,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library", "//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library", diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index f814741a68..d77eb5898b 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -43,7 +43,6 @@ import ( "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -178,7 +177,7 @@ func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endp hosts: ep, path: source.Path, readOnly: readOnly, - mountOptions: volume.MountOptionFromSpec(spec), + mountOptions: volutil.MountOptionFromSpec(spec), }, nil } @@ -328,7 +327,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { } options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":")) - mountOptions := volume.JoinMountOptions(b.mountOptions, options) + mountOptions := volutil.JoinMountOptions(b.mountOptions, options) // with `backup-volfile-servers` mount option in place, it is not required to // iterate over all the servers in the addrlist. A mount attempt with this option @@ -502,7 +501,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll pvName := pv.ObjectMeta.Name - gidStr, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey] + gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey] if !ok { glog.Warningf("no GID found in pv %v", pvName) @@ -583,7 +582,7 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) ( } func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) { - gidStr, ok := d.spec.Annotations[volumehelper.VolumeGidAnnotationKey] + gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey] if !ok { return 0, false, nil @@ -669,7 +668,7 @@ func (d *glusterfsVolumeDeleter) Delete() error { } func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { + if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) } @@ -723,12 +722,12 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { gidStr := strconv.FormatInt(int64(gid), 10) pv.Annotations = map[string]string{ - volumehelper.VolumeGidAnnotationKey: gidStr, - volumehelper.VolumeDynamicallyCreatedByKey: heketiAnn, - glusterTypeAnn: "file", - "Description": glusterDescAnn, - v1.MountOptionAnnotation: "auto_unmount", - heketiVolIDAnn: volID, + volutil.VolumeGidAnnotationKey: gidStr, + volutil.VolumeDynamicallyCreatedByKey: heketiAnn, + glusterTypeAnn: "file", + "Description": glusterDescAnn, + v1.MountOptionAnnotation: "auto_unmount", + heketiVolIDAnn: volID, } pv.Spec.Capacity = v1.ResourceList{ @@ -743,8 +742,9 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolum capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // GlusterFS/heketi creates volumes in units of GiB. - sz := int(volume.RoundUpToGiB(capacity)) + sz := int(volutil.RoundUpToGiB(capacity)) glog.V(2).Infof("create volume of size %dGiB", sz) + if p.url == "" { glog.Errorf("REST server endpoint is empty") return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") @@ -1126,10 +1126,10 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res // Find out delta size expansionSize := (newSize.Value() - oldSize.Value()) - expansionSizeGiB := int(volume.RoundUpSize(expansionSize, volume.GIB)) + expansionSizeGiB := int(volutil.RoundUpSize(expansionSize, volutil.GIB)) // Find out requested Size - requestGiB := volume.RoundUpToGiB(newSize) + requestGiB := volutil.RoundUpToGiB(newSize) //Check the existing volume size currentVolumeInfo, err := cli.VolumeInfo(volumeID) diff --git a/pkg/volume/host_path/BUILD b/pkg/volume/host_path/BUILD index 8e1a36196e..b188bb3d94 100644 --- a/pkg/volume/host_path/BUILD +++ b/pkg/volume/host_path/BUILD @@ -16,7 +16,8 @@ go_library( deps = [ "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", + "//pkg/volume/util/recyclerclient:go_default_library", "//pkg/volume/validation:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/volume/host_path/host_path.go b/pkg/volume/host_path/host_path.go index 97e3fa5db5..4a3cb76b6a 100644 --- a/pkg/volume/host_path/host_path.go +++ b/pkg/volume/host_path/host_path.go @@ -27,7 +27,8 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/recyclerclient" "k8s.io/kubernetes/pkg/volume/validation" ) @@ -129,13 +130,13 @@ func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (vo // Recycle recycles/scrubs clean a HostPath volume. // Recycle blocks until the pod has completed or any error occurs. // HostPath recycling only works in single node clusters and is meant for testing purposes only. -func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) error { +func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error { if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil { return fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil") } pod := plugin.config.RecyclerPodTemplate - timeout := volume.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume) + timeout := util.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume) // overrides pod.Spec.ActiveDeadlineSeconds = &timeout pod.Spec.Volumes[0].VolumeSource = v1.VolumeSource{ @@ -143,7 +144,7 @@ func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRec Path: spec.PersistentVolume.Spec.HostPath.Path, }, } - return volume.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder) + return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder) } func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { @@ -272,7 +273,7 @@ func (r *hostPathProvisioner) Provision() (*v1.PersistentVolume, error) { ObjectMeta: metav1.ObjectMeta{ Name: r.options.PVName, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "hostpath-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "hostpath-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/iscsi/BUILD b/pkg/volume/iscsi/BUILD index 15abe4ff02..8e1f600de7 100644 --- a/pkg/volume/iscsi/BUILD +++ b/pkg/volume/iscsi/BUILD @@ -22,7 +22,7 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/volume/iscsi/attacher.go b/pkg/volume/iscsi/attacher.go index 2aab2ecc9d..0ca2f4d5fe 100644 --- a/pkg/volume/iscsi/attacher.go +++ b/pkg/volume/iscsi/attacher.go @@ -29,7 +29,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type iscsiAttacher struct { @@ -113,7 +112,7 @@ func (attacher *iscsiAttacher) MountDevice(spec *volume.Spec, devicePath string, } if notMnt { diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(iscsiPluginName)} - mountOptions := volume.MountOptionFromSpec(spec, options...) + mountOptions := volumeutil.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, fsType, mountOptions) if err != nil { os.Remove(deviceMountPath) @@ -184,7 +183,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) exec := host.GetExec(iscsiPluginName) // TODO: remove feature gate check after no longer needed if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - volumeMode, err := volumehelper.GetVolumeMode(spec) + volumeMode, err := volumeutil.GetVolumeMode(spec) if err != nil { return nil, err } diff --git a/pkg/volume/iscsi/disk_manager.go b/pkg/volume/iscsi/disk_manager.go index 4d5e9f9fe7..aa1caeaf99 100644 --- a/pkg/volume/iscsi/disk_manager.go +++ b/pkg/volume/iscsi/disk_manager.go @@ -22,6 +22,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" ) // Abstract interface to disk operations. @@ -63,7 +64,7 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter b.iscsiDisk.Iface = b.iscsiDisk.Portals[0] + ":" + b.iscsiDisk.VolName } globalPDPath := manager.MakeGlobalPDName(*b.iscsiDisk) - mountOptions := volume.JoinMountOptions(b.mountOptions, options) + mountOptions := util.JoinMountOptions(b.mountOptions, options) err = mounter.Mount(globalPDPath, volPath, "", mountOptions) if err != nil { glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index ac644af2e6..525386e193 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -31,6 +31,7 @@ import ( utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" ioutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) // This is the primary entrypoint for volume plugins. @@ -118,7 +119,7 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, exec: exec, deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()), - mountOptions: volume.MountOptionFromSpec(spec), + mountOptions: ioutil.MountOptionFromSpec(spec), }, nil } @@ -235,7 +236,7 @@ func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*v func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { pluginDir := plugin.host.GetVolumeDevicePluginDir(iscsiPluginName) - blkutil := ioutil.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) if err != nil { return nil, err diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index 89b84789f3..ad6382f308 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) var ( @@ -518,10 +519,10 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) // GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get // and remove loopback device then it will be remained on kubelet node. To avoid the problem, // local attach plugins needs to remove loopback device during TearDownDevice(). - blkUtil := volumeutil.NewBlockVolumePathHandler() - loop, err := volumeutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, devicePath) + blkUtil := volumepathhandler.NewBlockVolumePathHandler() + loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, devicePath) if err != nil { - if err.Error() != volumeutil.ErrDeviceNotFound { + if err.Error() != volumepathhandler.ErrDeviceNotFound { return fmt.Errorf("failed to get loopback for device: %v, err: %v", devicePath, err) } glog.Warning("iscsi: loopback for device: %s not found", device) @@ -533,7 +534,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) } if len(loop) != 0 { // The volume was successfully detached from node. We can safely remove the loopback. - err = volumeutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) + err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) if err != nil { return fmt.Errorf("failed to remove loopback :%v, err: %v", loop, err) } diff --git a/pkg/volume/metrics_du.go b/pkg/volume/metrics_du.go index 19a29cbbc8..88a985d5ac 100644 --- a/pkg/volume/metrics_du.go +++ b/pkg/volume/metrics_du.go @@ -19,7 +19,7 @@ package volume import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/fs" ) var _ MetricsProvider = &metricsDu{} @@ -66,7 +66,7 @@ func (md *metricsDu) GetMetrics() (*Metrics, error) { // runDu executes the "du" command and writes the results to metrics.Used func (md *metricsDu) runDu(metrics *Metrics) error { - used, err := util.Du(md.path) + used, err := fs.Du(md.path) if err != nil { return err } @@ -76,7 +76,7 @@ func (md *metricsDu) runDu(metrics *Metrics) error { // runFind executes the "find" command and writes the results to metrics.InodesUsed func (md *metricsDu) runFind(metrics *Metrics) error { - inodesUsed, err := util.Find(md.path) + inodesUsed, err := fs.Find(md.path) if err != nil { return err } @@ -87,7 +87,7 @@ func (md *metricsDu) runFind(metrics *Metrics) error { // getFsInfo writes metrics.Capacity and metrics.Available from the filesystem // info func (md *metricsDu) getFsInfo(metrics *Metrics) error { - available, capacity, _, inodes, inodesFree, _, err := util.FsInfo(md.path) + available, capacity, _, inodes, inodesFree, _, err := fs.FsInfo(md.path) if err != nil { return NewFsInfoFailedError(err) } diff --git a/pkg/volume/metrics_statfs.go b/pkg/volume/metrics_statfs.go index ede4f6ef8f..66f99e30a7 100644 --- a/pkg/volume/metrics_statfs.go +++ b/pkg/volume/metrics_statfs.go @@ -19,7 +19,7 @@ package volume import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/fs" ) var _ MetricsProvider = &metricsStatFS{} @@ -55,7 +55,7 @@ func (md *metricsStatFS) GetMetrics() (*Metrics, error) { // getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info func (md *metricsStatFS) getFsInfo(metrics *Metrics) error { - available, capacity, usage, inodes, inodesFree, inodesUsed, err := util.FsInfo(md.path) + available, capacity, usage, inodes, inodesFree, inodesUsed, err := fs.FsInfo(md.path) if err != nil { return NewFsInfoFailedError(err) } diff --git a/pkg/volume/nfs/BUILD b/pkg/volume/nfs/BUILD index dcd34a6b26..48ffc558bb 100644 --- a/pkg/volume/nfs/BUILD +++ b/pkg/volume/nfs/BUILD @@ -18,6 +18,7 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", + "//pkg/volume/util/recyclerclient:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/volume/nfs/nfs.go b/pkg/volume/nfs/nfs.go index 26db61d729..708ff7babd 100644 --- a/pkg/volume/nfs/nfs.go +++ b/pkg/volume/nfs/nfs.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" + "k8s.io/kubernetes/pkg/volume/util/recyclerclient" ) // This is the primary entrypoint for volume plugins. @@ -123,7 +124,7 @@ func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, moun server: source.Server, exportPath: source.Path, readOnly: readOnly, - mountOptions: volume.MountOptionFromSpec(spec), + mountOptions: util.MountOptionFromSpec(spec), }, nil } @@ -142,13 +143,13 @@ func (plugin *nfsPlugin) newUnmounterInternal(volName string, podUID types.UID, // Recycle recycles/scrubs clean an NFS volume. // Recycle blocks until the pod has completed or any error occurs. -func (plugin *nfsPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder volume.RecycleEventRecorder) error { +func (plugin *nfsPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error { if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil { return fmt.Errorf("spec.PersistentVolumeSource.NFS is nil") } pod := plugin.config.RecyclerPodTemplate - timeout := volume.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume) + timeout := util.CalculateTimeoutForVolume(plugin.config.RecyclerMinimumTimeout, plugin.config.RecyclerTimeoutIncrement, spec.PersistentVolume) // overrides pod.Spec.ActiveDeadlineSeconds = &timeout pod.GenerateName = "pv-recycler-nfs-" @@ -158,7 +159,7 @@ func (plugin *nfsPlugin) Recycle(pvName string, spec *volume.Spec, eventRecorder Path: spec.PersistentVolume.Spec.NFS.Path, }, } - return volume.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder) + return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder) } func (plugin *nfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { @@ -249,7 +250,7 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error { if b.readOnly { options = append(options, "ro") } - mountOptions := volume.JoinMountOptions(b.mountOptions, options) + mountOptions := util.JoinMountOptions(b.mountOptions, options) err = b.mounter.Mount(source, dir, "nfs", mountOptions) if err != nil { notMnt, mntErr := b.mounter.IsNotMountPoint(dir) diff --git a/pkg/volume/photon_pd/BUILD b/pkg/volume/photon_pd/BUILD index 4bad78b18a..17314ec1a0 100644 --- a/pkg/volume/photon_pd/BUILD +++ b/pkg/volume/photon_pd/BUILD @@ -21,7 +21,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/volume/photon_pd/attacher.go b/pkg/volume/photon_pd/attacher.go index e247a3996f..54370d6049 100644 --- a/pkg/volume/photon_pd/attacher.go +++ b/pkg/volume/photon_pd/attacher.go @@ -32,7 +32,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type photonPersistentDiskAttacher struct { @@ -211,8 +210,8 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev options := []string{} if notMnt { - diskMounter := volumehelper.NewSafeFormatAndMountFromHost(photonPersistentDiskPluginName, attacher.host) - mountOptions := volume.MountOptionFromSpec(spec) + diskMounter := volumeutil.NewSafeFormatAndMountFromHost(photonPersistentDiskPluginName, attacher.host) + mountOptions := volumeutil.MountOptionFromSpec(spec) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) if err != nil { os.Remove(deviceMountPath) diff --git a/pkg/volume/photon_pd/photon_pd.go b/pkg/volume/photon_pd/photon_pd.go index 99965a775b..25ca23928d 100644 --- a/pkg/volume/photon_pd/photon_pd.go +++ b/pkg/volume/photon_pd/photon_pd.go @@ -30,7 +30,6 @@ import ( utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // This is the primary entrypoint for volume plugins. @@ -115,7 +114,7 @@ func (plugin *photonPersistentDiskPlugin) newMounterInternal(spec *volume.Spec, plugin: plugin, }, fsType: fsType, - diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil } func (plugin *photonPersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) { @@ -342,7 +341,7 @@ func (plugin *photonPersistentDiskPlugin) newProvisionerInternal(options volume. } func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) } @@ -360,7 +359,7 @@ func (p *photonPersistentDiskProvisioner) Provision() (*v1.PersistentVolume, err Name: p.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "photon-volume-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "photon-volume-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/photon_pd/photon_util.go b/pkg/volume/photon_pd/photon_util.go index 7449abfe9d..d4eb69a2fb 100644 --- a/pkg/volume/photon_pd/photon_util.go +++ b/pkg/volume/photon_pd/photon_util.go @@ -90,8 +90,8 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() // PhotonController works with GB, convert to GB with rounding up - volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) - name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255) + volSizeGB := int(volumeutil.RoundUpSize(volSizeBytes, 1024*1024*1024)) + name := volumeutil.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 255) volumeOptions := &photon.VolumeOptions{ CapacityGB: volSizeGB, Tags: *p.options.CloudTags, diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index 4057b27898..ec4ec57914 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume/util/recyclerclient" ) const ( @@ -161,7 +162,7 @@ type RecyclableVolumePlugin interface { // Recycle will use the provided recorder to write any events that might be // interesting to user. It's expected that caller will pass these events to // the PV being recycled. - Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error + Recycle(pvName string, spec *Spec, eventRecorder recyclerclient.RecycleEventRecorder) error } // DeletableVolumePlugin is an extended interface of VolumePlugin and is used diff --git a/pkg/volume/portworx/BUILD b/pkg/volume/portworx/BUILD index c9cf314e30..321306515b 100644 --- a/pkg/volume/portworx/BUILD +++ b/pkg/volume/portworx/BUILD @@ -33,7 +33,7 @@ go_library( "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api/client:go_default_library", diff --git a/pkg/volume/portworx/portworx.go b/pkg/volume/portworx/portworx.go index 8c1738bbaa..70b6ece936 100644 --- a/pkg/volume/portworx/portworx.go +++ b/pkg/volume/portworx/portworx.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" ) const ( @@ -119,7 +119,7 @@ func (plugin *portworxVolumePlugin) newMounterInternal(spec *volume.Spec, podUID }, fsType: fsType, readOnly: readOnly, - diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil } func (plugin *portworxVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { @@ -358,7 +358,7 @@ type portworxVolumeProvisioner struct { var _ volume.Provisioner = &portworxVolumeProvisioner{} func (c *portworxVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } @@ -372,7 +372,7 @@ func (c *portworxVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { Name: c.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "portworx-volume-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "portworx-volume-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/portworx/portworx_util.go b/pkg/volume/portworx/portworx_util.go index fbf4b88378..5faed5f1fc 100644 --- a/pkg/volume/portworx/portworx_util.go +++ b/pkg/volume/portworx/portworx_util.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/volume" + volutil "k8s.io/kubernetes/pkg/volume/util" ) const ( @@ -55,7 +56,7 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Portworx Volumes are specified in GB - requestGB := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024)) + requestGB := int(volutil.RoundUpSize(capacity.Value(), 1024*1024*1024)) // Perform a best-effort parsing of parameters. Portworx 1.2.9 and later parses volume parameters from // spec.VolumeLabels. So even if below SpecFromOpts() fails to parse certain parameters or diff --git a/pkg/volume/quobyte/BUILD b/pkg/volume/quobyte/BUILD index 941fdf1c16..4634396371 100644 --- a/pkg/volume/quobyte/BUILD +++ b/pkg/volume/quobyte/BUILD @@ -19,7 +19,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/quobyte/api:go_default_library", diff --git a/pkg/volume/quobyte/quobyte.go b/pkg/volume/quobyte/quobyte.go index 8e6ec1f7d1..0a5990b290 100644 --- a/pkg/volume/quobyte/quobyte.go +++ b/pkg/volume/quobyte/quobyte.go @@ -32,7 +32,6 @@ import ( "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -176,7 +175,7 @@ func (plugin *quobytePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, }, registry: source.Registry, readOnly: readOnly, - mountOptions: volume.MountOptionFromSpec(spec), + mountOptions: util.MountOptionFromSpec(spec), }, nil } @@ -256,7 +255,7 @@ func (mounter *quobyteMounter) SetUpAt(dir string, fsGroup *int64) error { } //if a trailing slash is missing we add it here - mountOptions := volume.JoinMountOptions(mounter.mountOptions, options) + mountOptions := util.JoinMountOptions(mounter.mountOptions, options) if err := mounter.mounter.Mount(mounter.correctTraillingSlash(mounter.registry), dir, "quobyte", mountOptions); err != nil { return fmt.Errorf("quobyte: mount failed: %v", err) } @@ -356,7 +355,7 @@ type quobyteVolumeProvisioner struct { } func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(provisioner.plugin.GetAccessModes(), provisioner.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(provisioner.plugin.GetAccessModes(), provisioner.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", provisioner.options.PVC.Spec.AccessModes, provisioner.plugin.GetAccessModes()) } @@ -410,7 +409,7 @@ func (provisioner *quobyteVolumeProvisioner) Provision() (*v1.PersistentVolume, return nil, err } pv := new(v1.PersistentVolume) - metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volumehelper.VolumeDynamicallyCreatedByKey, "quobyte-dynamic-provisioner") + metav1.SetMetaDataAnnotation(&pv.ObjectMeta, util.VolumeDynamicallyCreatedByKey, "quobyte-dynamic-provisioner") pv.Spec.PersistentVolumeSource.Quobyte = vol pv.Spec.PersistentVolumeReclaimPolicy = provisioner.options.PersistentVolumeReclaimPolicy pv.Spec.AccessModes = provisioner.options.PVC.Spec.AccessModes diff --git a/pkg/volume/quobyte/quobyte_util.go b/pkg/volume/quobyte/quobyte_util.go index 2b5db49fa2..c1deb552a3 100644 --- a/pkg/volume/quobyte/quobyte_util.go +++ b/pkg/volume/quobyte/quobyte_util.go @@ -22,7 +22,7 @@ import ( "strings" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" "github.com/golang/glog" quobyteapi "github.com/quobyte/api" @@ -34,7 +34,7 @@ type quobyteVolumeManager struct { func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner, createQuota bool) (quobyte *v1.QuobyteVolumeSource, size int, err error) { capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - volumeSize := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024)) + volumeSize := int(util.RoundUpSize(capacity.Value(), 1024*1024*1024)) // Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited) // to simulate a size constraint we set here a Quota for logical space volumeRequest := &quobyteapi.CreateVolumeRequest{ diff --git a/pkg/volume/rbd/BUILD b/pkg/volume/rbd/BUILD index f08725f46f..65042418cd 100644 --- a/pkg/volume/rbd/BUILD +++ b/pkg/volume/rbd/BUILD @@ -23,7 +23,7 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/volume/rbd/attacher.go b/pkg/volume/rbd/attacher.go index 1454c231fb..2e5960092e 100644 --- a/pkg/volume/rbd/attacher.go +++ b/pkg/volume/rbd/attacher.go @@ -27,7 +27,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // NewAttacher implements AttachableVolumePlugin.NewAttacher. @@ -39,7 +38,7 @@ func (plugin *rbdPlugin) newAttacherInternal(manager diskManager) (volume.Attach return &rbdAttacher{ plugin: plugin, manager: manager, - mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), }, nil } @@ -52,7 +51,7 @@ func (plugin *rbdPlugin) newDetacherInternal(manager diskManager) (volume.Detach return &rbdDetacher{ plugin: plugin, manager: manager, - mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), }, nil } @@ -154,7 +153,7 @@ func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, d if ro { options = append(options, "ro") } - mountOptions := volume.MountOptionFromSpec(spec, options...) + mountOptions := volutil.MountOptionFromSpec(spec, options...) err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions) if err != nil { os.Remove(deviceMountPath) diff --git a/pkg/volume/rbd/disk_manager.go b/pkg/volume/rbd/disk_manager.go index 70db99b3cf..6f62f48544 100644 --- a/pkg/volume/rbd/disk_manager.go +++ b/pkg/volume/rbd/disk_manager.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" ) // Abstract interface to disk operations. @@ -85,7 +86,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. if (&b).GetAttributes().ReadOnly { options = append(options, "ro") } - mountOptions := volume.JoinMountOptions(b.mountOptions, options) + mountOptions := util.JoinMountOptions(b.mountOptions, options) err = mounter.Mount(globalPDPath, volPath, "", mountOptions) if err != nil { glog.Errorf("failed to bind mount:%s", globalPDPath) diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 9323e4bbd2..ffc5c03c10 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -35,7 +35,7 @@ import ( "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) var ( @@ -327,7 +327,7 @@ func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, Keyring: keyring, Secret: secret, fsType: fstype, - mountOptions: volume.MountOptionFromSpec(spec), + mountOptions: volutil.MountOptionFromSpec(spec), }, nil } @@ -389,7 +389,7 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol func (plugin *rbdPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) { pluginDir := plugin.host.GetVolumeDevicePluginDir(rbdPluginName) - blkutil := volutil.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) if err != nil { @@ -569,7 +569,7 @@ type rbdVolumeProvisioner struct { var _ volume.Provisioner = &rbdVolumeProvisioner{} func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) { + if !volutil.AccessModesContainedInAll(r.plugin.GetAccessModes(), r.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", r.options.PVC.Spec.AccessModes, r.plugin.GetAccessModes()) } @@ -665,7 +665,7 @@ func (r *rbdVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { } glog.Infof("successfully created rbd image %q", image) pv := new(v1.PersistentVolume) - metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volumehelper.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner") + metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volutil.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner") if secretName != "" { rbd.SecretRef = new(v1.SecretReference) @@ -741,7 +741,7 @@ func newRBD(podUID types.UID, volName string, image string, pool string, readOnl Pool: pool, ReadOnly: readOnly, plugin: plugin, - mounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), + mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host), exec: plugin.host.GetExec(plugin.GetPluginName()), manager: manager, MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)), @@ -937,13 +937,13 @@ func (rbd *rbdDiskUnmapper) TearDownDevice(mapPath, _ string) error { // GenerateUnmapDeviceFunc() in operation_generator. As a result, these plugins fail to get // and remove loopback device then it will be remained on kubelet node. To avoid the problem, // local attach plugins needs to remove loopback device during TearDownDevice(). - blkUtil := volutil.NewBlockVolumePathHandler() - loop, err := volutil.BlockVolumePathHandler.GetLoopDevice(blkUtil, device) + blkUtil := volumepathhandler.NewBlockVolumePathHandler() + loop, err := volumepathhandler.BlockVolumePathHandler.GetLoopDevice(blkUtil, device) if err != nil { return fmt.Errorf("rbd: failed to get loopback for device: %v, err: %v", device, err) } // Remove loop device before detaching volume since volume detach operation gets busy if volume is opened by loopback. - err = volutil.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) + err = volumepathhandler.BlockVolumePathHandler.RemoveLoopDevice(blkUtil, loop) if err != nil { return fmt.Errorf("rbd: failed to remove loopback :%v, err: %v", loop, err) } diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index a232dab4ed..3374205104 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -563,7 +563,7 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() // Convert to MB that rbd defaults on. - sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024)) + sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024)) volSz := fmt.Sprintf("%d", sz) mon := util.kernelRBDMonitorsOpt(p.Mon) if p.rbdMounter.imageFormat == rbdImageFormat2 { @@ -621,7 +621,7 @@ func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resourc var err error volSizeBytes := newSize.Value() // Convert to MB that rbd defaults on. - sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024)) + sz := int(volutil.RoundUpSize(volSizeBytes, 1024*1024)) newVolSz := fmt.Sprintf("%d", sz) newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz)) diff --git a/pkg/volume/scaleio/BUILD b/pkg/volume/scaleio/BUILD index bb03f309aa..b6c80d8df2 100644 --- a/pkg/volume/scaleio/BUILD +++ b/pkg/volume/scaleio/BUILD @@ -45,7 +45,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/codedellemc/goscaleio:go_default_library", "//vendor/github.com/codedellemc/goscaleio/types/v1:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/pkg/volume/scaleio/sio_volume.go b/pkg/volume/scaleio/sio_volume.go index 0d0d35608b..b7f0270111 100644 --- a/pkg/volume/scaleio/sio_volume.go +++ b/pkg/volume/scaleio/sio_volume.go @@ -33,7 +33,6 @@ import ( kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type sioVolume struct { @@ -142,7 +141,7 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { } glog.V(4).Info(log("setup created mount point directory %s", dir)) - diskMounter := volumehelper.NewSafeFormatAndMountFromHost(v.plugin.GetPluginName(), v.plugin.host) + diskMounter := util.NewSafeFormatAndMountFromHost(v.plugin.GetPluginName(), v.plugin.host) err = diskMounter.FormatAndMount(devicePath, dir, v.fsType, options) if err != nil { @@ -256,7 +255,7 @@ var _ volume.Provisioner = &sioVolume{} func (v *sioVolume) Provision() (*api.PersistentVolume, error) { glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name)) - if !volume.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes()) } @@ -267,14 +266,14 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) { capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)] volSizeBytes := capacity.Value() - volSizeGB := int64(volume.RoundUpSize(volSizeBytes, oneGig)) + volSizeGB := int64(util.RoundUpSize(volSizeBytes, oneGig)) if volSizeBytes == 0 { return nil, fmt.Errorf("invalid volume size of 0 specified") } if volSizeBytes < eightGig { - volSizeGB = int64(volume.RoundUpSize(eightGig, oneGig)) + volSizeGB = int64(util.RoundUpSize(eightGig, oneGig)) glog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB)) } @@ -314,7 +313,7 @@ func (v *sioVolume) Provision() (*api.PersistentVolume, error) { Namespace: v.options.PVC.Namespace, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "scaleio-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "scaleio-dynamic-provisioner", }, }, Spec: api.PersistentVolumeSpec{ diff --git a/pkg/volume/secret/secret.go b/pkg/volume/secret/secret.go index 67f4556e3f..116da827c2 100644 --- a/pkg/volume/secret/secret.go +++ b/pkg/volume/secret/secret.go @@ -303,7 +303,7 @@ func (c *secretVolumeUnmounter) TearDown() error { } func (c *secretVolumeUnmounter) TearDownAt(dir string) error { - return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) + return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID) } func getVolumeSource(spec *volume.Spec) (*v1.SecretVolumeSource, bool) { diff --git a/pkg/volume/storageos/BUILD b/pkg/volume/storageos/BUILD index 804b762836..bff52a9eac 100644 --- a/pkg/volume/storageos/BUILD +++ b/pkg/volume/storageos/BUILD @@ -19,7 +19,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/storageos/go-api:go_default_library", "//vendor/github.com/storageos/go-api/types:go_default_library", diff --git a/pkg/volume/storageos/storageos.go b/pkg/volume/storageos/storageos.go index 5f9c2aa2c2..a814c438dd 100644 --- a/pkg/volume/storageos/storageos.go +++ b/pkg/volume/storageos/storageos.go @@ -35,7 +35,6 @@ import ( kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // ProbeVolumePlugins is the primary entrypoint for volume plugins. @@ -137,7 +136,7 @@ func (plugin *storageosPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod MetricsProvider: volume.NewMetricsStatFS(getPath(pod.UID, volNamespace, volName, spec.Name(), plugin.host)), }, diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}, - mountOptions: volume.MountOptionFromSpec(spec), + mountOptions: util.MountOptionFromSpec(spec), }, nil } @@ -389,7 +388,7 @@ func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error { if b.readOnly { options = append(options, "ro") } - mountOptions := volume.JoinMountOptions(b.mountOptions, options) + mountOptions := util.JoinMountOptions(b.mountOptions, options) globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName) glog.V(4).Infof("Attempting to bind mount to pod volume at %s", dir) @@ -562,7 +561,7 @@ type storageosProvisioner struct { var _ volume.Provisioner = &storageosProvisioner{} func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } @@ -600,7 +599,7 @@ func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) { c.labels[k] = v } capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] - c.sizeGB = int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024)) + c.sizeGB = int(util.RoundUpSize(capacity.Value(), 1024*1024*1024)) apiCfg, err := parsePVSecret(adminSecretNamespace, adminSecretName, c.plugin.host.GetKubeClient()) if err != nil { @@ -622,7 +621,7 @@ func (c *storageosProvisioner) Provision() (*v1.PersistentVolume, error) { Name: vol.Name, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "storageos-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "storageos-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/testing/BUILD b/pkg/volume/testing/BUILD index f4356f4e58..f434e5b15a 100644 --- a/pkg/volume/testing/BUILD +++ b/pkg/volume/testing/BUILD @@ -19,7 +19,8 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/recyclerclient:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/stretchr/testify/mock:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index f4e7f2d209..9e5ae2cd12 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -40,7 +40,8 @@ import ( utilstrings "k8s.io/kubernetes/pkg/util/strings" . "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/recyclerclient" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) // fakeVolumeHost is useful for testing volume plugins. @@ -379,7 +380,7 @@ func (plugin *FakeVolumePlugin) GetNewDetacherCallCount() int { return plugin.NewDetacherCallCount } -func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error { +func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *Spec, eventRecorder recyclerclient.RecycleEventRecorder) error { return nil } @@ -711,7 +712,7 @@ func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) { ObjectMeta: metav1.ObjectMeta{ Name: fc.Options.PVName, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "fakeplugin-provisioner", + util.VolumeDynamicallyCreatedByKey: "fakeplugin-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ @@ -731,10 +732,10 @@ func (fc *FakeProvisioner) Provision() (*v1.PersistentVolume, error) { return pv, nil } -var _ util.BlockVolumePathHandler = &FakeVolumePathHandler{} +var _ volumepathhandler.BlockVolumePathHandler = &FakeVolumePathHandler{} //NewDeviceHandler Create a new IoHandler implementation -func NewBlockVolumePathHandler() util.BlockVolumePathHandler { +func NewBlockVolumePathHandler() volumepathhandler.BlockVolumePathHandler { return &FakeVolumePathHandler{} } diff --git a/pkg/volume/util.go b/pkg/volume/util.go deleted file mode 100644 index f268a50820..0000000000 --- a/pkg/volume/util.go +++ /dev/null @@ -1,523 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volume - -import ( - "fmt" - "reflect" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/watch" - clientset "k8s.io/client-go/kubernetes" - - "hash/fnv" - "math/rand" - "strconv" - "strings" - - "github.com/golang/glog" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" -) - -const ( - // GB - GigaByte size - GB = 1000 * 1000 * 1000 - // GIB - GibiByte size - GIB = 1024 * 1024 * 1024 -) - -type RecycleEventRecorder func(eventtype, message string) - -// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume -// Recyclers. This function will save the given Pod to the API and watch it -// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded, -// whichever comes first. An attempt to delete a recycler pod is always -// attempted before returning. -// -// In case there is a pod with the same namespace+name already running, this -// function deletes it as it is not able to judge if it is an old recycler -// or user has forged a fake recycler to block Kubernetes from recycling.// -// -// pod - the pod designed by a volume plugin to recycle the volume. pod.Name -// will be overwritten with unique name based on PV.Name. -// client - kube client for API operations. -func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error { - return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder)) -} - -// same as above func comments, except 'recyclerClient' is a narrower pod API -// interface to ease testing -func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error { - glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) - - // Generate unique name for the recycler pod - we need to get "already - // exists" error when a previous controller has already started recycling - // the volume. Here we assume that pv.Name is already unique. - pod.Name = "recycler-for-" + pvName - pod.GenerateName = "" - - stopChannel := make(chan struct{}) - defer close(stopChannel) - podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel) - if err != nil { - glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) - return err - } - - // Start the pod - _, err = recyclerClient.CreatePod(pod) - if err != nil { - if errors.IsAlreadyExists(err) { - deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) - if deleteErr != nil { - return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr) - } - // Recycler will try again and the old pod will be hopefully deleted - // at that time. - return fmt.Errorf("old recycler pod found, will retry later") - } - return fmt.Errorf("unexpected error creating recycler pod: %+v", err) - } - err = waitForPod(pod, recyclerClient, podCh) - - // In all cases delete the recycler pod and log its result. - glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) - deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) - if deleteErr != nil { - glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) - } - - // Returning recycler error is preferred, the pod will be deleted again on - // the next retry. - if err != nil { - return fmt.Errorf("failed to recycle volume: %s", err) - } - - // Recycle succeeded but we failed to delete the recycler pod. Report it, - // the controller will re-try recycling the PV again shortly. - if deleteErr != nil { - return fmt.Errorf("failed to delete recycler pod: %s", deleteErr) - } - - return nil -} - -// waitForPod watches the pod it until it finishes and send all events on the -// pod to the PV. -func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error { - for { - event, ok := <-podCh - if !ok { - return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name) - } - switch event.Object.(type) { - case *v1.Pod: - // POD changed - pod := event.Object.(*v1.Pod) - glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) - switch event.Type { - case watch.Added, watch.Modified: - if pod.Status.Phase == v1.PodSucceeded { - // Recycle succeeded. - return nil - } - if pod.Status.Phase == v1.PodFailed { - if pod.Status.Message != "" { - return fmt.Errorf(pod.Status.Message) - } else { - return fmt.Errorf("pod failed, pod.Status.Message unknown.") - } - } - - case watch.Deleted: - return fmt.Errorf("recycler pod was deleted") - - case watch.Error: - return fmt.Errorf("recycler pod watcher failed") - } - - case *v1.Event: - // Event received - podEvent := event.Object.(*v1.Event) - glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) - if event.Type == watch.Added { - recyclerClient.Event(podEvent.Type, podEvent.Message) - } - } - } -} - -// recyclerClient abstracts access to a Pod by providing a narrower interface. -// This makes it easier to mock a client for testing. -type recyclerClient interface { - CreatePod(pod *v1.Pod) (*v1.Pod, error) - GetPod(name, namespace string) (*v1.Pod, error) - DeletePod(name, namespace string) error - // WatchPod returns a ListWatch for watching a pod. The stopChannel is used - // to close the reflector backing the watch. The caller is responsible for - // derring a close on the channel to stop the reflector. - WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) - // Event sends an event to the volume that is being recycled. - Event(eventtype, message string) -} - -func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient { - return &realRecyclerClient{ - client, - recorder, - } -} - -type realRecyclerClient struct { - client clientset.Interface - recorder RecycleEventRecorder -} - -func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { - return c.client.CoreV1().Pods(pod.Namespace).Create(pod) -} - -func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { - return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) -} - -func (c *realRecyclerClient) DeletePod(name, namespace string) error { - return c.client.CoreV1().Pods(namespace).Delete(name, nil) -} - -func (c *realRecyclerClient) Event(eventtype, message string) { - c.recorder(eventtype, message) -} - -func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) { - podSelector, err := fields.ParseSelector("metadata.name=" + name) - if err != nil { - return nil, err - } - options := metav1.ListOptions{ - FieldSelector: podSelector.String(), - Watch: true, - } - - podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options) - if err != nil { - return nil, err - } - - eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name) - eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{ - FieldSelector: eventSelector.String(), - Watch: true, - }) - if err != nil { - podWatch.Stop() - return nil, err - } - - eventCh := make(chan watch.Event, 30) - - go func() { - defer eventWatch.Stop() - defer podWatch.Stop() - defer close(eventCh) - var podWatchChannelClosed bool - var eventWatchChannelClosed bool - for { - select { - case _ = <-stopChannel: - return - - case podEvent, ok := <-podWatch.ResultChan(): - if !ok { - podWatchChannelClosed = true - } else { - eventCh <- podEvent - } - case eventEvent, ok := <-eventWatch.ResultChan(): - if !ok { - eventWatchChannelClosed = true - } else { - eventCh <- eventEvent - } - } - if podWatchChannelClosed && eventWatchChannelClosed { - break - } - } - }() - - return eventCh, nil -} - -// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a -// recycle operation. The calculation and return value is either the -// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is -// greater. -func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 { - giQty := resource.MustParse("1Gi") - pvQty := pv.Spec.Capacity[v1.ResourceStorage] - giSize := giQty.Value() - pvSize := pvQty.Value() - timeout := (pvSize / giSize) * int64(timeoutIncrement) - if timeout < int64(minimumTimeout) { - return int64(minimumTimeout) - } - return timeout -} - -// RoundUpSize calculates how many allocation units are needed to accommodate -// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS -// allocates volumes in gibibyte-sized chunks, -// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2' -// (2 GiB is the smallest allocatable volume that can hold 1500MiB) -func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { - return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes -} - -// RoundUpToGB rounds up given quantity to chunks of GB -func RoundUpToGB(size resource.Quantity) int64 { - requestBytes := size.Value() - return RoundUpSize(requestBytes, GB) -} - -// RoundUpToGiB rounds up given quantity upto chunks of GiB -func RoundUpToGiB(size resource.Quantity) int64 { - requestBytes := size.Value() - return RoundUpSize(requestBytes, GIB) -} - -// GenerateVolumeName returns a PV name with clusterName prefix. The function -// should be used to generate a name of GCE PD or Cinder volume. It basically -// adds "-dynamic-" before the PV name, making sure the resulting -// string fits given length and cuts "dynamic" if not. -func GenerateVolumeName(clusterName, pvName string, maxLength int) string { - prefix := clusterName + "-dynamic" - pvLen := len(pvName) - - // cut the "-dynamic" to fit full pvName into maxLength - // +1 for the '-' dash - if pvLen+1+len(prefix) > maxLength { - prefix = prefix[:maxLength-pvLen-1] - } - return prefix + "-" + pvName -} - -// GetPath checks if the path from the mounter is empty. -func GetPath(mounter Mounter) (string, error) { - path := mounter.GetPath() - if path == "" { - return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String()) - } - return path, nil -} - -// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name -// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name. -// However, if the PVCName ends with `-`, we will hash the prefix, and then add the integer to the hash. -// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones, -// assuming the id values are consecutive. -func ChooseZoneForVolume(zones sets.String, pvcName string) string { - // We create the volume in a zone determined by the name - // Eventually the scheduler will coordinate placement into an available zone - hash, index := getPVCNameHashAndIndexOffset(pvcName) - - // Zones.List returns zones in a consistent order (sorted) - // We do have a potential failure case where volumes will not be properly spread, - // if the set of zones changes during StatefulSet volume creation. However, this is - // probably relatively unlikely because we expect the set of zones to be essentially - // static for clusters. - // Hopefully we can address this problem if/when we do full scheduler integration of - // PVC placement (which could also e.g. avoid putting volumes in overloaded or - // unhealthy zones) - zoneSlice := zones.List() - zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] - - glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) - return zone -} - -// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks. -func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String { - // We create the volume in a zone determined by the name - // Eventually the scheduler will coordinate placement into an available zone - hash, index := getPVCNameHashAndIndexOffset(pvcName) - - // Zones.List returns zones in a consistent order (sorted) - // We do have a potential failure case where volumes will not be properly spread, - // if the set of zones changes during StatefulSet volume creation. However, this is - // probably relatively unlikely because we expect the set of zones to be essentially - // static for clusters. - // Hopefully we can address this problem if/when we do full scheduler integration of - // PVC placement (which could also e.g. avoid putting volumes in overloaded or - // unhealthy zones) - zoneSlice := zones.List() - replicaZones := sets.NewString() - - startingIndex := index * numZones - for index = startingIndex; index < startingIndex+numZones; index++ { - zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] - replicaZones.Insert(zone) - } - - glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", - pvcName, replicaZones.UnsortedList(), zoneSlice) - return replicaZones -} - -func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { - if pvcName == "" { - // We should always be called with a name; this shouldn't happen - glog.Warningf("No name defined during volume create; choosing random zone") - - hash = rand.Uint32() - } else { - hashString := pvcName - - // Heuristic to make sure that volumes in a StatefulSet are spread across zones - // StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id, - // where Id is an integer index. - // Note though that if a StatefulSet pod has multiple claims, we need them to be - // in the same zone, because otherwise the pod will be unable to mount both volumes, - // and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when - // it looks like `ClaimName-StatefulSetName-Id`. - // We continue to round-robin volume names that look like `Name-Id` also; this is a useful - // feature for users that are creating statefulset-like functionality without using statefulsets. - lastDash := strings.LastIndexByte(pvcName, '-') - if lastDash != -1 { - statefulsetIDString := pvcName[lastDash+1:] - statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32) - if err == nil { - // Offset by the statefulsetID, so we round-robin across zones - index = uint32(statefulsetID) - // We still hash the volume name, but only the prefix - hashString = pvcName[:lastDash] - - // In the special case where it looks like `ClaimName-StatefulSetName-Id`, - // hash only the StatefulSetName, so that different claims on the same StatefulSet - // member end up in the same zone. - // Note that StatefulSetName (and ClaimName) might themselves both have dashes. - // We actually just take the portion after the final - of ClaimName-StatefulSetName. - // For our purposes it doesn't much matter (just suboptimal spreading). - lastDash := strings.LastIndexByte(hashString, '-') - if lastDash != -1 { - hashString = hashString[lastDash+1:] - } - - glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) - } - } - - // We hash the (base) volume name, so we don't bias towards the first N zones - h := fnv.New32() - h.Write([]byte(hashString)) - hash = h.Sum32() - } - - return hash, index -} - -// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi -// to empty_dir -func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error { - glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) - - // Wrap EmptyDir, let it do the teardown. - wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID) - if err != nil { - return err - } - return wrapped.TearDownAt(dir) -} - -// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options -func MountOptionFromSpec(spec *Spec, options ...string) []string { - pv := spec.PersistentVolume - - if pv != nil { - // Use beta annotation first - if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok { - moList := strings.Split(mo, ",") - return JoinMountOptions(moList, options) - } - - if len(pv.Spec.MountOptions) > 0 { - return JoinMountOptions(pv.Spec.MountOptions, options) - } - } - - return options -} - -// JoinMountOptions joins mount options eliminating duplicates -func JoinMountOptions(userOptions []string, systemOptions []string) []string { - allMountOptions := sets.NewString() - - for _, mountOption := range userOptions { - if len(mountOption) > 0 { - allMountOptions.Insert(mountOption) - } - } - - for _, mountOption := range systemOptions { - allMountOptions.Insert(mountOption) - } - return allMountOptions.UnsortedList() -} - -// ValidateZone returns: -// - an error in case zone is an empty string or contains only any combination of spaces and tab characters -// - nil otherwise -func ValidateZone(zone string) error { - if strings.TrimSpace(zone) == "" { - return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone) - } - return nil -} - -// AccessModesContains returns whether the requested mode is contained by modes -func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// AccessModesContainedInAll returns whether all of the requested modes are contained by modes -func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool { - for _, mode := range requestedModes { - if !AccessModesContains(indexedModes, mode) { - return false - } - } - return true -} - -// GetWindowsPath get a windows path -func GetWindowsPath(path string) string { - windowsPath := strings.Replace(path, "/", "\\", -1) - if strings.HasPrefix(windowsPath, "\\") { - windowsPath = "c:" + windowsPath - } - return windowsPath -} diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index e82c225687..7530e4fcb4 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -1,10 +1,4 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -21,116 +15,63 @@ go_library( ] + select({ "@io_bazel_rules_go//go/platform:android": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:darwin": [ "device_util_unsupported.go", - "fs.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:dragonfly": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:freebsd": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:linux": [ "device_util_linux.go", - "fs.go", - "util_linux.go", ], "@io_bazel_rules_go//go/platform:nacl": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:netbsd": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:openbsd": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:plan9": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:solaris": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "@io_bazel_rules_go//go/platform:windows": [ "device_util_unsupported.go", - "fs_unsupported.go", - "util_unsupported.go", ], "//conditions:default": [], }), importpath = "k8s.io/kubernetes/pkg/volume/util", + visibility = ["//visibility:public"], deps = [ "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/util/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/storage/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "//conditions:default": [], - }), + ], ) go_test( @@ -150,6 +91,8 @@ go_test( "//pkg/apis/core/install:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", "//pkg/util/mount:go_default_library", + "//pkg/util/slice:go_default_library", + "//pkg/volume:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -169,10 +112,13 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//pkg/volume/util/fs:all-srcs", "//pkg/volume/util/nestedpendingoperations:all-srcs", "//pkg/volume/util/operationexecutor:all-srcs", + "//pkg/volume/util/recyclerclient:all-srcs", "//pkg/volume/util/types:all-srcs", - "//pkg/volume/util/volumehelper:all-srcs", + "//pkg/volume/util/volumepathhandler:all-srcs", ], tags = ["automanaged"], + visibility = ["//visibility:public"], ) diff --git a/pkg/volume/util/fs/BUILD b/pkg/volume/util/fs/BUILD new file mode 100644 index 0000000000..205ddaaaeb --- /dev/null +++ b/pkg/volume/util/fs/BUILD @@ -0,0 +1,95 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = select({ + "@io_bazel_rules_go//go/platform:android": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "fs.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "fs.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "fs_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "fs_unsupported.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/volume/util/fs", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/volume/util/fs.go b/pkg/volume/util/fs/fs.go similarity index 99% rename from pkg/volume/util/fs.go rename to pkg/volume/util/fs/fs.go index c756c4a184..bbb4b0105c 100644 --- a/pkg/volume/util/fs.go +++ b/pkg/volume/util/fs/fs.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package fs import ( "bytes" diff --git a/pkg/volume/util/fs_unsupported.go b/pkg/volume/util/fs/fs_unsupported.go similarity index 98% rename from pkg/volume/util/fs_unsupported.go rename to pkg/volume/util/fs/fs_unsupported.go index 8d35d5daed..da41fc8eee 100644 --- a/pkg/volume/util/fs_unsupported.go +++ b/pkg/volume/util/fs/fs_unsupported.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package fs import ( "fmt" diff --git a/pkg/volume/util/operationexecutor/BUILD b/pkg/volume/util/operationexecutor/BUILD index ad28f3a456..f7676a07c7 100644 --- a/pkg/volume/util/operationexecutor/BUILD +++ b/pkg/volume/util/operationexecutor/BUILD @@ -23,7 +23,7 @@ go_library( "//pkg/volume/util:go_default_library", "//pkg/volume/util/nestedpendingoperations:go_default_library", "//pkg/volume/util/types:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util/volumepathhandler:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 9ce8c11d1d..9aa04a06ac 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) // OperationExecutor defines a set of operations for attaching, detaching, @@ -708,7 +708,7 @@ func (oe *operationExecutor) MountVolume( volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater, isRemount bool) error { - fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeToMount.VolumeSpec) + fsVolume, err := util.CheckVolumeModeFilesystem(volumeToMount.VolumeSpec) if err != nil { return err } @@ -736,7 +736,7 @@ func (oe *operationExecutor) MountVolume( if !volumeToMount.PluginIsAttachable { // Non-attachable volume plugins can execute mount for multiple pods // referencing the same volume in parallel - podName = volumehelper.GetUniquePodName(volumeToMount.Pod) + podName = util.GetUniquePodName(volumeToMount.Pod) } // TODO mount_device @@ -747,7 +747,7 @@ func (oe *operationExecutor) MountVolume( func (oe *operationExecutor) UnmountVolume( volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error { - fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeToUnmount.VolumeSpec) + fsVolume, err := util.CheckVolumeModeFilesystem(volumeToUnmount.VolumeSpec) if err != nil { return err } @@ -778,7 +778,7 @@ func (oe *operationExecutor) UnmountDevice( deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter mount.Interface) error { - fsVolume, err := volumehelper.CheckVolumeModeFilesystem(deviceToDetach.VolumeSpec) + fsVolume, err := util.CheckVolumeModeFilesystem(deviceToDetach.VolumeSpec) if err != nil { return err } @@ -881,7 +881,7 @@ func (oe *operationExecutor) CheckVolumeExistenceOperation( podName volumetypes.UniquePodName, podUID types.UID, attachable volume.AttachableVolumePlugin) (bool, error) { - fsVolume, err := volumehelper.CheckVolumeModeFilesystem(volumeSpec) + fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec) if err != nil { return false, err } @@ -913,7 +913,7 @@ func (oe *operationExecutor) CheckVolumeExistenceOperation( // is there. Either plugin is attachable or non-attachable, the plugin should // have symbolic link associated to raw block device under pod device map // if volume exists. - blkutil := util.NewBlockVolumePathHandler() + blkutil := volumepathhandler.NewBlockVolumePathHandler() var islinkExist bool var checkErr error if islinkExist, checkErr = blkutil.IsSymlinkExist(mountPath); checkErr != nil { diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 50293d5e1e..21e4bbd04b 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -37,7 +37,7 @@ import ( "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" ) var _ OperationGenerator = &operationGenerator{} @@ -60,7 +60,7 @@ type operationGenerator struct { checkNodeCapabilitiesBeforeMount bool // blkUtil provides volume path related operations for block volume - blkUtil util.BlockVolumePathHandler + blkUtil volumepathhandler.BlockVolumePathHandler } // NewOperationGenerator is returns instance of operationGenerator @@ -68,7 +68,7 @@ func NewOperationGenerator(kubeClient clientset.Interface, volumePluginMgr *volume.VolumePluginMgr, recorder record.EventRecorder, checkNodeCapabilitiesBeforeMount bool, - blkUtil util.BlockVolumePathHandler) OperationGenerator { + blkUtil volumepathhandler.BlockVolumePathHandler) OperationGenerator { return &operationGenerator{ kubeClient: kubeClient, @@ -378,7 +378,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( // Get attacher plugin and the volumeName by splitting the volume unique name in case // there's no VolumeSpec: this happens only on attach/detach controller crash recovery // when a pod has been deleted during the controller downtime - pluginName, volumeName, err = volumehelper.SplitUniqueName(volumeToDetach.VolumeName) + pluginName, volumeName, err = util.SplitUniqueName(volumeToDetach.VolumeName) if err != nil { return volumetypes.GeneratedOperations{}, volumeToDetach.GenerateErrorDetailed("DetachVolume.SplitUniqueName failed", err) } @@ -1290,7 +1290,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( } func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount, plugin volume.VolumePlugin) error { - mountOptions := volume.MountOptionFromSpec(volumeToMount.VolumeSpec) + mountOptions := util.MountOptionFromSpec(volumeToMount.VolumeSpec) if len(mountOptions) > 0 && !plugin.SupportsMountOption() { return fmt.Errorf("Mount options are not supported for this volume type") diff --git a/pkg/volume/util/recyclerclient/BUILD b/pkg/volume/util/recyclerclient/BUILD new file mode 100644 index 0000000000..e65ad6c98b --- /dev/null +++ b/pkg/volume/util/recyclerclient/BUILD @@ -0,0 +1,44 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["recycler_client.go"], + importpath = "k8s.io/kubernetes/pkg/volume/util/recyclerclient", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["recycler_client_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/apis/core:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/volume/util/recyclerclient/recycler_client.go b/pkg/volume/util/recyclerclient/recycler_client.go new file mode 100644 index 0000000000..1af6465c6e --- /dev/null +++ b/pkg/volume/util/recyclerclient/recycler_client.go @@ -0,0 +1,252 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recyclerclient + +import ( + "fmt" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" +) + +type RecycleEventRecorder func(eventtype, message string) + +// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume +// Recyclers. This function will save the given Pod to the API and watch it +// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded, +// whichever comes first. An attempt to delete a recycler pod is always +// attempted before returning. +// +// In case there is a pod with the same namespace+name already running, this +// function deletes it as it is not able to judge if it is an old recycler +// or user has forged a fake recycler to block Kubernetes from recycling.// +// +// pod - the pod designed by a volume plugin to recycle the volume. pod.Name +// will be overwritten with unique name based on PV.Name. +// client - kube client for API operations. +func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error { + return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder)) +} + +// same as above func comments, except 'recyclerClient' is a narrower pod API +// interface to ease testing +func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error { + glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) + + // Generate unique name for the recycler pod - we need to get "already + // exists" error when a previous controller has already started recycling + // the volume. Here we assume that pv.Name is already unique. + pod.Name = "recycler-for-" + pvName + pod.GenerateName = "" + + stopChannel := make(chan struct{}) + defer close(stopChannel) + podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel) + if err != nil { + glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) + return err + } + + // Start the pod + _, err = recyclerClient.CreatePod(pod) + if err != nil { + if errors.IsAlreadyExists(err) { + deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) + if deleteErr != nil { + return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr) + } + // Recycler will try again and the old pod will be hopefully deleted + // at that time. + return fmt.Errorf("old recycler pod found, will retry later") + } + return fmt.Errorf("unexpected error creating recycler pod: %+v", err) + } + err = waitForPod(pod, recyclerClient, podCh) + + // In all cases delete the recycler pod and log its result. + glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) + deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) + if deleteErr != nil { + glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) + } + + // Returning recycler error is preferred, the pod will be deleted again on + // the next retry. + if err != nil { + return fmt.Errorf("failed to recycle volume: %s", err) + } + + // Recycle succeeded but we failed to delete the recycler pod. Report it, + // the controller will re-try recycling the PV again shortly. + if deleteErr != nil { + return fmt.Errorf("failed to delete recycler pod: %s", deleteErr) + } + + return nil +} + +// waitForPod watches the pod it until it finishes and send all events on the +// pod to the PV. +func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error { + for { + event, ok := <-podCh + if !ok { + return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name) + } + switch event.Object.(type) { + case *v1.Pod: + // POD changed + pod := event.Object.(*v1.Pod) + glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) + switch event.Type { + case watch.Added, watch.Modified: + if pod.Status.Phase == v1.PodSucceeded { + // Recycle succeeded. + return nil + } + if pod.Status.Phase == v1.PodFailed { + if pod.Status.Message != "" { + return fmt.Errorf(pod.Status.Message) + } else { + return fmt.Errorf("pod failed, pod.Status.Message unknown.") + } + } + + case watch.Deleted: + return fmt.Errorf("recycler pod was deleted") + + case watch.Error: + return fmt.Errorf("recycler pod watcher failed") + } + + case *v1.Event: + // Event received + podEvent := event.Object.(*v1.Event) + glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) + if event.Type == watch.Added { + recyclerClient.Event(podEvent.Type, podEvent.Message) + } + } + } +} + +// recyclerClient abstracts access to a Pod by providing a narrower interface. +// This makes it easier to mock a client for testing. +type recyclerClient interface { + CreatePod(pod *v1.Pod) (*v1.Pod, error) + GetPod(name, namespace string) (*v1.Pod, error) + DeletePod(name, namespace string) error + // WatchPod returns a ListWatch for watching a pod. The stopChannel is used + // to close the reflector backing the watch. The caller is responsible for + // derring a close on the channel to stop the reflector. + WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) + // Event sends an event to the volume that is being recycled. + Event(eventtype, message string) +} + +func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient { + return &realRecyclerClient{ + client, + recorder, + } +} + +type realRecyclerClient struct { + client clientset.Interface + recorder RecycleEventRecorder +} + +func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { + return c.client.CoreV1().Pods(pod.Namespace).Create(pod) +} + +func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { + return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) +} + +func (c *realRecyclerClient) DeletePod(name, namespace string) error { + return c.client.CoreV1().Pods(namespace).Delete(name, nil) +} + +func (c *realRecyclerClient) Event(eventtype, message string) { + c.recorder(eventtype, message) +} + +func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) { + podSelector, err := fields.ParseSelector("metadata.name=" + name) + if err != nil { + return nil, err + } + options := metav1.ListOptions{ + FieldSelector: podSelector.String(), + Watch: true, + } + + podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options) + if err != nil { + return nil, err + } + + eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name) + eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{ + FieldSelector: eventSelector.String(), + Watch: true, + }) + if err != nil { + podWatch.Stop() + return nil, err + } + + eventCh := make(chan watch.Event, 30) + + go func() { + defer eventWatch.Stop() + defer podWatch.Stop() + defer close(eventCh) + var podWatchChannelClosed bool + var eventWatchChannelClosed bool + for { + select { + case _ = <-stopChannel: + return + + case podEvent, ok := <-podWatch.ResultChan(): + if !ok { + podWatchChannelClosed = true + } else { + eventCh <- podEvent + } + case eventEvent, ok := <-eventWatch.ResultChan(): + if !ok { + eventWatchChannelClosed = true + } else { + eventCh <- eventEvent + } + } + if podWatchChannelClosed && eventWatchChannelClosed { + break + } + } + }() + + return eventCh, nil +} diff --git a/pkg/volume/util/recyclerclient/recycler_client_test.go b/pkg/volume/util/recyclerclient/recycler_client_test.go new file mode 100644 index 0000000000..64e04fbcc2 --- /dev/null +++ b/pkg/volume/util/recyclerclient/recycler_client_test.go @@ -0,0 +1,235 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recyclerclient + +import ( + "fmt" + "testing" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + api "k8s.io/kubernetes/pkg/apis/core" +) + +type testcase struct { + // Input of the test + name string + existingPod *v1.Pod + createPod *v1.Pod + // eventSequence is list of events that are simulated during recycling. It + // can be either event generated by a recycler pod or a state change of + // the pod. (see newPodEvent and newEvent below). + eventSequence []watch.Event + + // Expected output. + // expectedEvents is list of events that were sent to the volume that was + // recycled. + expectedEvents []mockEvent + expectedError string +} + +func newPodEvent(eventtype watch.EventType, name string, phase v1.PodPhase, message string) watch.Event { + return watch.Event{ + Type: eventtype, + Object: newPod(name, phase, message), + } +} + +func newEvent(eventtype, message string) watch.Event { + return watch.Event{ + Type: watch.Added, + Object: &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + }, + Reason: "MockEvent", + Message: message, + Type: eventtype, + }, + } +} + +func newPod(name string, phase v1.PodPhase, message string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: name, + }, + Status: v1.PodStatus{ + Phase: phase, + Message: message, + }, + } +} + +func TestRecyclerPod(t *testing.T) { + tests := []testcase{ + { + // Test recycler success with some events + name: "RecyclerSuccess", + createPod: newPod("podRecyclerSuccess", v1.PodPending, ""), + eventSequence: []watch.Event{ + // Pod gets Running and Succeeded + newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""), + newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"), + newEvent(v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""), + newEvent(v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""), + newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"), + newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"), + newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""), + newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodSucceeded, ""), + }, + expectedEvents: []mockEvent{ + {v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"}, + {v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""}, + {v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""}, + {v1.EventTypeNormal, "Created container with docker id 83d929aeac82"}, + {v1.EventTypeNormal, "Started container with docker id 83d929aeac82"}, + }, + expectedError: "", + }, + { + // Test recycler failure with some events + name: "RecyclerFailure", + createPod: newPod("podRecyclerFailure", v1.PodPending, ""), + eventSequence: []watch.Event{ + // Pod gets Running and Succeeded + newPodEvent(watch.Added, "podRecyclerFailure", v1.PodPending, ""), + newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"), + newEvent(v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"), + newEvent(v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"), + newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodRunning, ""), + newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodFailed, "Pod was active on the node longer than specified deadline"), + }, + expectedEvents: []mockEvent{ + {v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"}, + {v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"}, + {v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"}, + }, + expectedError: "failed to recycle volume: Pod was active on the node longer than specified deadline", + }, + { + // Recycler pod gets deleted + name: "RecyclerDeleted", + createPod: newPod("podRecyclerDeleted", v1.PodPending, ""), + eventSequence: []watch.Event{ + // Pod gets Running and Succeeded + newPodEvent(watch.Added, "podRecyclerDeleted", v1.PodPending, ""), + newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"), + newPodEvent(watch.Deleted, "podRecyclerDeleted", v1.PodPending, ""), + }, + expectedEvents: []mockEvent{ + {v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"}, + }, + expectedError: "failed to recycle volume: recycler pod was deleted", + }, + { + // Another recycler pod is already running + name: "RecyclerRunning", + existingPod: newPod("podOldRecycler", v1.PodRunning, ""), + createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"), + eventSequence: []watch.Event{}, + expectedError: "old recycler pod found, will retry later", + }, + } + + for _, test := range tests { + t.Logf("Test %q", test.name) + client := &mockRecyclerClient{ + events: test.eventSequence, + pod: test.existingPod, + } + err := internalRecycleVolumeByWatchingPodUntilCompletion(test.createPod.Name, test.createPod, client) + receivedError := "" + if err != nil { + receivedError = err.Error() + } + if receivedError != test.expectedError { + t.Errorf("Test %q failed, expected error %q, got %q", test.name, test.expectedError, receivedError) + continue + } + if !client.deletedCalled { + t.Errorf("Test %q failed, expected deferred client.Delete to be called on recycler pod", test.name) + continue + } + for i, expectedEvent := range test.expectedEvents { + if len(client.receivedEvents) <= i { + t.Errorf("Test %q failed, expected event %d: %q not received", test.name, i, expectedEvent.message) + continue + } + receivedEvent := client.receivedEvents[i] + if expectedEvent.eventtype != receivedEvent.eventtype { + t.Errorf("Test %q failed, event %d does not match: expected eventtype %q, got %q", test.name, i, expectedEvent.eventtype, receivedEvent.eventtype) + } + if expectedEvent.message != receivedEvent.message { + t.Errorf("Test %q failed, event %d does not match: expected message %q, got %q", test.name, i, expectedEvent.message, receivedEvent.message) + } + } + for i := len(test.expectedEvents); i < len(client.receivedEvents); i++ { + t.Errorf("Test %q failed, unexpected event received: %s, %q", test.name, client.receivedEvents[i].eventtype, client.receivedEvents[i].message) + } + } +} + +type mockRecyclerClient struct { + pod *v1.Pod + deletedCalled bool + receivedEvents []mockEvent + events []watch.Event +} + +type mockEvent struct { + eventtype, message string +} + +func (c *mockRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { + if c.pod == nil { + c.pod = pod + return c.pod, nil + } + // Simulate "already exists" error + return nil, errors.NewAlreadyExists(api.Resource("pods"), pod.Name) +} + +func (c *mockRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { + if c.pod != nil { + return c.pod, nil + } else { + return nil, fmt.Errorf("pod does not exist") + } +} + +func (c *mockRecyclerClient) DeletePod(name, namespace string) error { + c.deletedCalled = true + return nil +} + +func (c *mockRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) { + eventCh := make(chan watch.Event, 0) + go func() { + for _, e := range c.events { + eventCh <- e + } + }() + return eventCh, nil +} + +func (c *mockRecyclerClient) Event(eventtype, message string) { + c.receivedEvents = append(c.receivedEvents, mockEvent{eventtype, message}) +} diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index b9d8629d65..518103414c 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -21,7 +21,6 @@ import ( "io/ioutil" "os" "path" - "path/filepath" "strings" "syscall" @@ -31,21 +30,51 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/api/legacyscheme" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume" + + "reflect" + + "hash/fnv" + "math/rand" + "strconv" + + "k8s.io/apimachinery/pkg/api/resource" + utypes "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/volume/util/types" ) const ( - readyFileName = "ready" - losetupPath = "losetup" + // GB - GigaByte size + GB = 1000 * 1000 * 1000 + // GIB - GibiByte size + GIB = 1024 * 1024 * 1024 - ErrDeviceNotFound = "device not found" - ErrDeviceNotSupported = "device not supported" + readyFileName = "ready" + + // ControllerManagedAttachAnnotation is the key of the annotation on Node + // objects that indicates attach/detach operations for the node should be + // managed by the attach/detach controller + ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach" + + // KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node + // that decides if pod volumes are unmounted when pod is terminated + KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes" + + // VolumeGidAnnotationKey is the of the annotation on the PersistentVolume + // object that specifies a supplemental GID. + VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid" + + // VolumeDynamicallyCreatedByKey is the key of the annotation on PersistentVolume + // object created dynamically + VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby" ) // IsReady checks for the existence of a regular file @@ -341,200 +370,382 @@ func stringToSet(str, delimiter string) (sets.String, error) { return zonesSet, nil } -// BlockVolumePathHandler defines a set of operations for handling block volume-related operations -type BlockVolumePathHandler interface { - // MapDevice creates a symbolic link to block device under specified map path - MapDevice(devicePath string, mapPath string, linkName string) error - // UnmapDevice removes a symbolic link to block device under specified map path - UnmapDevice(mapPath string, linkName string) error - // RemovePath removes a file or directory on specified map path - RemoveMapPath(mapPath string) error - // IsSymlinkExist returns true if specified symbolic link exists - IsSymlinkExist(mapPath string) (bool, error) - // GetDeviceSymlinkRefs searches symbolic links under global map path - GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) - // FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath - // corresponding to map path symlink, and then return global map path with pod uuid. - FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) - // AttachFileDevice takes a path to a regular file and makes it available as an - // attached block device. - AttachFileDevice(path string) (string, error) - // GetLoopDevice returns the full path to the loop device associated with the given path. - GetLoopDevice(path string) (string, error) - // RemoveLoopDevice removes specified loopback device - RemoveLoopDevice(device string) error +// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a +// recycle operation. The calculation and return value is either the +// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is +// greater. +func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 { + giQty := resource.MustParse("1Gi") + pvQty := pv.Spec.Capacity[v1.ResourceStorage] + giSize := giQty.Value() + pvSize := pvQty.Value() + timeout := (pvSize / giSize) * int64(timeoutIncrement) + if timeout < int64(minimumTimeout) { + return int64(minimumTimeout) + } + return timeout } -// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler. -func NewBlockVolumePathHandler() BlockVolumePathHandler { - var volumePathHandler VolumePathHandler - return volumePathHandler +// RoundUpSize calculates how many allocation units are needed to accommodate +// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS +// allocates volumes in gibibyte-sized chunks, +// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2' +// (2 GiB is the smallest allocatable volume that can hold 1500MiB) +func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 { + return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes } -// VolumePathHandler is path related operation handlers for block volume -type VolumePathHandler struct { +// RoundUpToGB rounds up given quantity to chunks of GB +func RoundUpToGB(size resource.Quantity) int64 { + requestBytes := size.Value() + return RoundUpSize(requestBytes, GB) } -// MapDevice creates a symbolic link to block device under specified map path -func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error { - // Example of global map path: - // globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid} - // linkName: {podUid} - // - // Example of pod device map path: - // podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} - // linkName: {volumeName} - if len(devicePath) == 0 { - return fmt.Errorf("Failed to map device to map path. devicePath is empty") - } - if len(mapPath) == 0 { - return fmt.Errorf("Failed to map device to map path. mapPath is empty") - } - if !filepath.IsAbs(mapPath) { - return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) - } - glog.V(5).Infof("MapDevice: devicePath %s", devicePath) - glog.V(5).Infof("MapDevice: mapPath %s", mapPath) - glog.V(5).Infof("MapDevice: linkName %s", linkName) +// RoundUpToGiB rounds up given quantity upto chunks of GiB +func RoundUpToGiB(size resource.Quantity) int64 { + requestBytes := size.Value() + return RoundUpSize(requestBytes, GIB) +} - // Check and create mapPath - _, err := os.Stat(mapPath) - if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate map path: %s", mapPath) +// GenerateVolumeName returns a PV name with clusterName prefix. The function +// should be used to generate a name of GCE PD or Cinder volume. It basically +// adds "-dynamic-" before the PV name, making sure the resulting +// string fits given length and cuts "dynamic" if not. +func GenerateVolumeName(clusterName, pvName string, maxLength int) string { + prefix := clusterName + "-dynamic" + pvLen := len(pvName) + + // cut the "-dynamic" to fit full pvName into maxLength + // +1 for the '-' dash + if pvLen+1+len(prefix) > maxLength { + prefix = prefix[:maxLength-pvLen-1] + } + return prefix + "-" + pvName +} + +// GetPath checks if the path from the mounter is empty. +func GetPath(mounter volume.Mounter) (string, error) { + path := mounter.GetPath() + if path == "" { + return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String()) + } + return path, nil +} + +// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name +// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name. +// However, if the PVCName ends with `-`, we will hash the prefix, and then add the integer to the hash. +// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones, +// assuming the id values are consecutive. +func ChooseZoneForVolume(zones sets.String, pvcName string) string { + // We create the volume in a zone determined by the name + // Eventually the scheduler will coordinate placement into an available zone + hash, index := getPVCNameHashAndIndexOffset(pvcName) + + // Zones.List returns zones in a consistent order (sorted) + // We do have a potential failure case where volumes will not be properly spread, + // if the set of zones changes during StatefulSet volume creation. However, this is + // probably relatively unlikely because we expect the set of zones to be essentially + // static for clusters. + // Hopefully we can address this problem if/when we do full scheduler integration of + // PVC placement (which could also e.g. avoid putting volumes in overloaded or + // unhealthy zones) + zoneSlice := zones.List() + zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] + + glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) + return zone +} + +// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks. +func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String { + // We create the volume in a zone determined by the name + // Eventually the scheduler will coordinate placement into an available zone + hash, index := getPVCNameHashAndIndexOffset(pvcName) + + // Zones.List returns zones in a consistent order (sorted) + // We do have a potential failure case where volumes will not be properly spread, + // if the set of zones changes during StatefulSet volume creation. However, this is + // probably relatively unlikely because we expect the set of zones to be essentially + // static for clusters. + // Hopefully we can address this problem if/when we do full scheduler integration of + // PVC placement (which could also e.g. avoid putting volumes in overloaded or + // unhealthy zones) + zoneSlice := zones.List() + replicaZones := sets.NewString() + + startingIndex := index * numZones + for index = startingIndex; index < startingIndex+numZones; index++ { + zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] + replicaZones.Insert(zone) + } + + glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", + pvcName, replicaZones.UnsortedList(), zoneSlice) + return replicaZones +} + +func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { + if pvcName == "" { + // We should always be called with a name; this shouldn't happen + glog.Warningf("No name defined during volume create; choosing random zone") + + hash = rand.Uint32() + } else { + hashString := pvcName + + // Heuristic to make sure that volumes in a StatefulSet are spread across zones + // StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id, + // where Id is an integer index. + // Note though that if a StatefulSet pod has multiple claims, we need them to be + // in the same zone, because otherwise the pod will be unable to mount both volumes, + // and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when + // it looks like `ClaimName-StatefulSetName-Id`. + // We continue to round-robin volume names that look like `Name-Id` also; this is a useful + // feature for users that are creating statefulset-like functionality without using statefulsets. + lastDash := strings.LastIndexByte(pvcName, '-') + if lastDash != -1 { + statefulsetIDString := pvcName[lastDash+1:] + statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32) + if err == nil { + // Offset by the statefulsetID, so we round-robin across zones + index = uint32(statefulsetID) + // We still hash the volume name, but only the prefix + hashString = pvcName[:lastDash] + + // In the special case where it looks like `ClaimName-StatefulSetName-Id`, + // hash only the StatefulSetName, so that different claims on the same StatefulSet + // member end up in the same zone. + // Note that StatefulSetName (and ClaimName) might themselves both have dashes. + // We actually just take the portion after the final - of ClaimName-StatefulSetName. + // For our purposes it doesn't much matter (just suboptimal spreading). + lastDash := strings.LastIndexByte(hashString, '-') + if lastDash != -1 { + hashString = hashString[lastDash+1:] + } + + glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) + } + } + + // We hash the (base) volume name, so we don't bias towards the first N zones + h := fnv.New32() + h.Write([]byte(hashString)) + hash = h.Sum32() + } + + return hash, index +} + +// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi +// to empty_dir +func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error { + glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) + + // Wrap EmptyDir, let it do the teardown. + wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID) + if err != nil { return err } - if err = os.MkdirAll(mapPath, 0750); err != nil { - return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err) - } - // Remove old symbolic link(or file) then create new one. - // This should be done because current symbolic link is - // stale across node reboot. - linkPath := path.Join(mapPath, string(linkName)) - if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) { - return err - } - err = os.Symlink(devicePath, linkPath) - return err + return wrapped.TearDownAt(dir) } -// UnmapDevice removes a symbolic link associated to block device under specified map path -func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { - if len(mapPath) == 0 { - return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") - } - glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) - glog.V(5).Infof("UnmapDevice: linkName %s", linkName) +// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options +func MountOptionFromSpec(spec *volume.Spec, options ...string) []string { + pv := spec.PersistentVolume - // Check symbolic link exists - linkPath := path.Join(mapPath, string(linkName)) - if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { - return checkErr - } else if !islinkExist { - glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) - return nil + if pv != nil { + // Use beta annotation first + if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok { + moList := strings.Split(mo, ",") + return JoinMountOptions(moList, options) + } + + if len(pv.Spec.MountOptions) > 0 { + return JoinMountOptions(pv.Spec.MountOptions, options) + } } - err := os.Remove(linkPath) - return err + + return options } -// RemoveMapPath removes a file or directory on specified map path -func (v VolumePathHandler) RemoveMapPath(mapPath string) error { - if len(mapPath) == 0 { - return fmt.Errorf("Failed to remove map path. mapPath is empty") +// JoinMountOptions joins mount options eliminating duplicates +func JoinMountOptions(userOptions []string, systemOptions []string) []string { + allMountOptions := sets.NewString() + + for _, mountOption := range userOptions { + if len(mountOption) > 0 { + allMountOptions.Insert(mountOption) + } } - glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) - err := os.RemoveAll(mapPath) - if err != nil && !os.IsNotExist(err) { - return err + + for _, mountOption := range systemOptions { + allMountOptions.Insert(mountOption) + } + return allMountOptions.UnsortedList() +} + +// ValidateZone returns: +// - an error in case zone is an empty string or contains only any combination of spaces and tab characters +// - nil otherwise +func ValidateZone(zone string) error { + if strings.TrimSpace(zone) == "" { + return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone) } return nil } -// IsSymlinkExist returns true if specified file exists and the type is symbolik link. -// If file doesn't exist, or file exists but not symbolick link, return false with no error. -// On other cases, return false with error from Lstat(). -func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) { - fi, err := os.Lstat(mapPath) - if err == nil { - // If file exits and it's symbolick link, return true and no error - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - return true, nil +// AccessModesContains returns whether the requested mode is contained by modes +func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true } - // If file exits but it's not symbolick link, return fale and no error - return false, nil } - // If file doesn't exist, return false and no error - if os.IsNotExist(err) { - return false, nil - } - // Return error from Lstat() - return false, err + return false } -// GetDeviceSymlinkRefs searches symbolic links under global map path -func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) { - var refs []string - files, err := ioutil.ReadDir(mapPath) - if err != nil { - return nil, fmt.Errorf("Directory cannot read %v", err) - } - for _, file := range files { - if file.Mode()&os.ModeSymlink != os.ModeSymlink { - continue +// AccessModesContainedInAll returns whether all of the requested modes are contained by modes +func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool { + for _, mode := range requestedModes { + if !AccessModesContains(indexedModes, mode) { + return false } - filename := file.Name() - filepath, err := os.Readlink(path.Join(mapPath, filename)) + } + return true +} + +// GetWindowsPath get a windows path +func GetWindowsPath(path string) string { + windowsPath := strings.Replace(path, "/", "\\", -1) + if strings.HasPrefix(windowsPath, "\\") { + windowsPath = "c:" + windowsPath + } + return windowsPath +} + +// GetUniquePodName returns a unique identifier to reference a pod by +func GetUniquePodName(pod *v1.Pod) types.UniquePodName { + return types.UniquePodName(pod.UID) +} + +// GetUniqueVolumeName returns a unique name representing the volume/plugin. +// Caller should ensure that volumeName is a name/ID uniquely identifying the +// actual backing device, directory, path, etc. for a particular volume. +// The returned name can be used to uniquely reference the volume, for example, +// to prevent operations (attach/detach or mount/unmount) from being triggered +// on the same volume. +func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName { + return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName)) +} + +// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name +// for a non-attachable volume. +func GetUniqueVolumeNameForNonAttachableVolume( + podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName { + return v1.UniqueVolumeName( + fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name())) +} + +// GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique +// name representing the volume defined in the specified volume spec. +// This returned name can be used to uniquely reference the actual backing +// device, directory, path, etc. referenced by the given volumeSpec. +// If the given plugin does not support the volume spec, this returns an error. +func GetUniqueVolumeNameFromSpec( + volumePlugin volume.VolumePlugin, + volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) { + if volumePlugin == nil { + return "", fmt.Errorf( + "volumePlugin should not be nil. volumeSpec.Name=%q", + volumeSpec.Name()) + } + + volumeName, err := volumePlugin.GetVolumeName(volumeSpec) + if err != nil || volumeName == "" { + return "", fmt.Errorf( + "failed to GetVolumeName from volumePlugin for volumeSpec %q err=%v", + volumeSpec.Name(), + err) + } + + return GetUniqueVolumeName( + volumePlugin.GetPluginName(), + volumeName), + nil +} + +// IsPodTerminated checks if pod is terminated +func IsPodTerminated(pod *v1.Pod, podStatus v1.PodStatus) bool { + return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.ContainerStatuses)) +} + +// notRunning returns true if every status is terminated or waiting, or the status list +// is empty. +func notRunning(statuses []v1.ContainerStatus) bool { + for _, status := range statuses { + if status.State.Terminated == nil && status.State.Waiting == nil { + return false + } + } + return true +} + +// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow +// the fromat plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface, +// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of +// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface +// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs +// the unique volume names. +func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) { + components := strings.SplitN(string(uniqueName), "/", 3) + if len(components) != 3 { + return "", "", fmt.Errorf("cannot split volume unique name %s to plugin/volume components", uniqueName) + } + pluginName := fmt.Sprintf("%s/%s", components[0], components[1]) + return pluginName, components[2], nil +} + +// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter +// and Exec taken from given VolumeHost. +func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount { + mounter := host.GetMounter(pluginName) + exec := host.GetExec(pluginName) + return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec} +} + +// GetVolumeMode retrieves VolumeMode from pv. +// If the volume doesn't have PersistentVolume, it's an inline volume, +// should return volumeMode as filesystem to keep existing behavior. +func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) { + if volumeSpec == nil || volumeSpec.PersistentVolume == nil { + return v1.PersistentVolumeFilesystem, nil + } + if volumeSpec.PersistentVolume.Spec.VolumeMode != nil { + return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil + } + return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name()) +} + +// GetPersistentVolumeClaimVolumeMode retrieves VolumeMode from pvc. +func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.PersistentVolumeMode, error) { + if claim.Spec.VolumeMode != nil { + return *claim.Spec.VolumeMode, nil + } + return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name) +} + +// CheckVolumeModeFilesystem checks VolumeMode. +// If the mode is Filesystem, return true otherwise return false. +func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) { + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + volumeMode, err := GetVolumeMode(volumeSpec) if err != nil { - return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) + return true, err } - glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) - if filepath == devPath { - refs = append(refs, path.Join(mapPath, filename)) + if volumeMode == v1.PersistentVolumeBlock { + return false, nil } } - glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) - return refs, nil -} - -// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath -// corresponding to map path symlink, and then return global map path with pod uuid. -// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX -// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX -func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { - var globalMapPathUUID string - // Find symbolic link named pod uuid under plugin dir - err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { - glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) - if res, err := compareSymlinks(path, mapPath); err == nil && res { - globalMapPathUUID = path - } - } - return nil - }) - if err != nil { - return "", err - } - glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) - // Return path contains global map path + {pod uuid} - return globalMapPathUUID, nil -} - -func compareSymlinks(global, pod string) (bool, error) { - devGlobal, err := os.Readlink(global) - if err != nil { - return false, err - } - devPod, err := os.Readlink(pod) - if err != nil { - return false, err - } - glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) - if devGlobal == devPod { - return true, nil - } - return false, nil + return true, nil } diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index b9da3df96e..35563b93fe 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -22,14 +22,23 @@ import ( "testing" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" utiltesting "k8s.io/client-go/util/testing" // util.go uses api.Codecs.LegacyCodec so import this package to do some // resource initialization. + "hash/fnv" _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" + + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/kubernetes/pkg/util/slice" + "k8s.io/kubernetes/pkg/volume" ) var nodeLabels map[string]string = map[string]string{ @@ -418,3 +427,664 @@ func TestDoUnmountMountPoint(t *testing.T) { } } } + +func TestCalculateTimeoutForVolume(t *testing.T) { + pv := &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"), + }, + }, + } + + timeout := CalculateTimeoutForVolume(50, 30, pv) + if timeout != 50 { + t.Errorf("Expected 50 for timeout but got %v", timeout) + } + + pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi") + timeout = CalculateTimeoutForVolume(50, 30, pv) + if timeout != 60 { + t.Errorf("Expected 60 for timeout but got %v", timeout) + } + + pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi") + timeout = CalculateTimeoutForVolume(50, 30, pv) + if timeout != 4500 { + t.Errorf("Expected 4500 for timeout but got %v", timeout) + } +} + +func TestGenerateVolumeName(t *testing.T) { + + // Normal operation, no truncate + v1 := GenerateVolumeName("kubernetes", "pv-cinder-abcde", 255) + if v1 != "kubernetes-dynamic-pv-cinder-abcde" { + t.Errorf("Expected kubernetes-dynamic-pv-cinder-abcde, got %s", v1) + } + + // Truncate trailing "6789-dynamic" + prefix := strings.Repeat("0123456789", 9) // 90 characters prefix + 8 chars. of "-dynamic" + v2 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100) + expect := prefix[:84] + "-pv-cinder-abcde" + if v2 != expect { + t.Errorf("Expected %s, got %s", expect, v2) + } + + // Truncate really long cluster name + prefix = strings.Repeat("0123456789", 1000) // 10000 characters prefix + v3 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100) + if v3 != expect { + t.Errorf("Expected %s, got %s", expect, v3) + } +} + +func TestMountOptionFromSpec(t *testing.T) { + scenarios := map[string]struct { + volume *volume.Spec + expectedMountList []string + systemOptions []string + }{ + "volume-with-mount-options": { + volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false}, + }, + }), + expectedMountList: []string{"ro", "nfsvers=3"}, + systemOptions: nil, + }, + "volume-with-bad-mount-options": { + volume: createVolumeSpecWithMountOption("good-mount-opts", "", v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false}, + }, + }), + expectedMountList: []string{}, + systemOptions: nil, + }, + "vol-with-sys-opts": { + volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false}, + }, + }), + expectedMountList: []string{"ro", "nfsvers=3", "fsid=100", "hard"}, + systemOptions: []string{"fsid=100", "hard"}, + }, + "vol-with-sys-opts-with-dup": { + volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false}, + }, + }), + expectedMountList: []string{"ro", "nfsvers=3", "fsid=100"}, + systemOptions: []string{"fsid=100", "ro"}, + }, + } + + for name, scenario := range scenarios { + mountOptions := MountOptionFromSpec(scenario.volume, scenario.systemOptions...) + if !reflect.DeepEqual(slice.SortStrings(mountOptions), slice.SortStrings(scenario.expectedMountList)) { + t.Errorf("for %s expected mount options : %v got %v", name, scenario.expectedMountList, mountOptions) + } + } +} + +func createVolumeSpecWithMountOption(name string, mountOptions string, spec v1.PersistentVolumeSpec) *volume.Spec { + annotations := map[string]string{ + v1.MountOptionAnnotation: mountOptions, + } + objMeta := metav1.ObjectMeta{ + Name: name, + Annotations: annotations, + } + + pv := &v1.PersistentVolume{ + ObjectMeta: objMeta, + Spec: spec, + } + return &volume.Spec{PersistentVolume: pv} +} + +func checkFnv32(t *testing.T, s string, expected int) { + h := fnv.New32() + h.Write([]byte(s)) + h.Sum32() + + if int(h.Sum32()) != expected { + t.Fatalf("hash of %q was %v, expected %v", s, h.Sum32(), expected) + } +} + +func TestChooseZoneForVolume(t *testing.T) { + checkFnv32(t, "henley", 1180403676) + // 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection + + // A few others + checkFnv32(t, "henley-", 2652299129) + checkFnv32(t, "henley-a", 1459735322) + checkFnv32(t, "", 2166136261) + + tests := []struct { + Zones sets.String + VolumeName string + Expected string + }{ + // Test for PVC names that don't have a dash + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley", + Expected: "a", // hash("henley") == 0 + }, + // Tests for PVC names that end in - number, but don't look like statefulset PVCs + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-0", + Expected: "a", // hash("henley") == 0 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-1", + Expected: "b", // hash("henley") + 1 == 1 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-2", + Expected: "c", // hash("henley") + 2 == 2 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-3", + Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-4", + Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3 + }, + // Tests for PVC names that are edge cases + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-", + Expected: "c", // hash("henley-") = 2652299129 === 2 mod 3 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-a", + Expected: "c", // hash("henley-a") = 1459735322 === 2 mod 3 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium--1", + Expected: "c", // hash("") + 1 == 2166136261 + 1 === 2 mod 3 + }, + // Tests for PVC names for simple StatefulSet cases + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-1", + Expected: "b", // hash("henley") + 1 == 1 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "loud-henley-1", + Expected: "b", // hash("henley") + 1 == 1 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "quiet-henley-2", + Expected: "c", // hash("henley") + 2 == 2 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-2", + Expected: "c", // hash("henley") + 2 == 2 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-3", + Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-4", + Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3 + }, + // Tests for statefulsets (or claims) with dashes in the names + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-alpha-henley-2", + Expected: "c", // hash("henley") + 2 == 2 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-beta-henley-3", + Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-gamma-henley-4", + Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3 + }, + // Tests for statefulsets name ending in - + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--2", + Expected: "a", // hash("") + 2 == 0 mod 3 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--3", + Expected: "b", // hash("") + 3 == 1 mod 3 + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--4", + Expected: "c", // hash("") + 4 == 2 mod 3 + }, + } + + for _, test := range tests { + actual := ChooseZoneForVolume(test.Zones, test.VolumeName) + + if actual != test.Expected { + t.Errorf("Test %v failed, expected zone %q, actual %q", test, test.Expected, actual) + } + } +} + +func TestChooseZonesForVolume(t *testing.T) { + checkFnv32(t, "henley", 1180403676) + // 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection + + // A few others + checkFnv32(t, "henley-", 2652299129) + checkFnv32(t, "henley-a", 1459735322) + checkFnv32(t, "", 2166136261) + + tests := []struct { + Zones sets.String + VolumeName string + NumZones uint32 + Expected sets.String + }{ + // Test for PVC names that don't have a dash + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley", + NumZones: 1, + Expected: sets.NewString("a" /* hash("henley") == 0 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley", + NumZones: 2, + Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"), + }, + // Tests for PVC names that end in - number, but don't look like statefulset PVCs + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-0", + NumZones: 1, + Expected: sets.NewString("a" /* hash("henley") == 0 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-0", + NumZones: 2, + Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-1", + NumZones: 1, + Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-1", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-2", + NumZones: 1, + Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-2", + NumZones: 2, + Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-3", + NumZones: 1, + Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-3", + NumZones: 2, + Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 */, "b"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-4", + NumZones: 1, + Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-4", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 */, "a"), + }, + // Tests for PVC names that are edge cases + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-", + NumZones: 1, + Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 = 2 */, "a"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-a", + NumZones: 1, + Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "henley-a", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 = 2 */, "a"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium--1", + NumZones: 1, + Expected: sets.NewString("c" /* hash("") + 1 == 2166136261 + 1 === 2 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium--1", + NumZones: 2, + Expected: sets.NewString("a" /* hash("") + 1 + 1(startingIndex) == 2166136261 + 1 + 1 === 3 mod 3 = 0 */, "b"), + }, + // Tests for PVC names for simple StatefulSet cases + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-1", + NumZones: 1, + Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */), + }, + // Tests for PVC names for simple StatefulSet cases + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-1", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "loud-henley-1", + NumZones: 1, + Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "loud-henley-1", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "quiet-henley-2", + NumZones: 1, + Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "quiet-henley-2", + NumZones: 2, + Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-2", + NumZones: 1, + Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-2", + NumZones: 2, + Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-3", + NumZones: 1, + Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-3", + NumZones: 2, + Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 6 mod 3 = 0 */, "b"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-4", + NumZones: 1, + Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley-4", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"), + }, + // Tests for statefulsets (or claims) with dashes in the names + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-alpha-henley-2", + NumZones: 1, + Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-alpha-henley-2", + NumZones: 2, + Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-beta-henley-3", + NumZones: 1, + Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-beta-henley-3", + NumZones: 2, + Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 0 mod 3 */, "b"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-gamma-henley-4", + NumZones: 1, + Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-gamma-henley-4", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"), + }, + // Tests for statefulsets name ending in - + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--2", + NumZones: 1, + Expected: sets.NewString("a" /* hash("") + 2 == 0 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--2", + NumZones: 2, + Expected: sets.NewString("c" /* hash("") + 2 + 2(startingIndex) == 2 mod 3 */, "a"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--3", + NumZones: 1, + Expected: sets.NewString("b" /* hash("") + 3 == 1 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--3", + NumZones: 2, + Expected: sets.NewString("b" /* hash("") + 3 + 3(startingIndex) == 1 mod 3 */, "c"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--4", + NumZones: 1, + Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--4", + NumZones: 2, + Expected: sets.NewString("a" /* hash("") + 4 + 4(startingIndex) == 0 mod 3 */, "b"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--4", + NumZones: 3, + Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */, "a", "b"), + }, + { + Zones: sets.NewString("a", "b", "c"), + VolumeName: "medium-henley--4", + NumZones: 4, + Expected: sets.NewString("c" /* hash("") + 4 + 9(startingIndex) == 2 mod 3 */, "a", "b", "c"), + }, + { + Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), + VolumeName: "henley-0", + NumZones: 2, + Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"), + }, + { + Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), + VolumeName: "henley-1", + NumZones: 2, + Expected: sets.NewString("c" /* hash("henley") == 0 + 2 */, "d"), + }, + { + Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), + VolumeName: "henley-2", + NumZones: 2, + Expected: sets.NewString("e" /* hash("henley") == 0 + 2 + 2(startingIndex) */, "f"), + }, + { + Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), + VolumeName: "henley-3", + NumZones: 2, + Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h"), + }, + { + Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), + VolumeName: "henley-0", + NumZones: 3, + Expected: sets.NewString("a" /* hash("henley") == 0 */, "b", "c"), + }, + { + Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), + VolumeName: "henley-1", + NumZones: 3, + Expected: sets.NewString("d" /* hash("henley") == 0 + 1 + 2(startingIndex) */, "e", "f"), + }, + { + Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), + VolumeName: "henley-2", + NumZones: 3, + Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h", "i"), + }, + { + Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), + VolumeName: "henley-3", + NumZones: 3, + Expected: sets.NewString("a" /* hash("henley") == 0 + 3 + 6(startingIndex) */, "b", "c"), + }, + } + + for _, test := range tests { + actual := ChooseZonesForVolume(test.Zones, test.VolumeName, test.NumZones) + + if !actual.Equal(test.Expected) { + t.Errorf("Test %v failed, expected zone %#v, actual %#v", test, test.Expected, actual) + } + } +} + +func TestValidateZone(t *testing.T) { + functionUnderTest := "ValidateZone" + + // First part: want an error + errCases := []string{"", " "} + for _, errCase := range errCases { + if got := ValidateZone(errCase); got == nil { + t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, errCase, got, "an error") + } + } + + // Second part: want no error + succCases := []string{" us-east-1a "} + for _, succCase := range succCases { + if got := ValidateZone(succCase); got != nil { + t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, succCase, got, nil) + } + } +} + +func TestGetWindowsPath(t *testing.T) { + tests := []struct { + path string + expectedPath string + }{ + { + path: `/var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4/volumes/kubernetes.io~disk`, + expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, + }, + { + path: `\var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, + expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, + }, + { + path: `/`, + expectedPath: `c:\`, + }, + { + path: ``, + expectedPath: ``, + }, + } + + for _, test := range tests { + result := GetWindowsPath(test.path) + if result != test.expectedPath { + t.Errorf("GetWindowsPath(%v) returned (%v), want (%v)", test.path, result, test.expectedPath) + } + } +} diff --git a/pkg/volume/util/volumehelper/BUILD b/pkg/volume/util/volumehelper/BUILD deleted file mode 100644 index 507792a963..0000000000 --- a/pkg/volume/util/volumehelper/BUILD +++ /dev/null @@ -1,33 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["volumehelper.go"], - importpath = "k8s.io/kubernetes/pkg/volume/util/volumehelper", - deps = [ - "//pkg/features:go_default_library", - "//pkg/util/mount:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/util/types:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/volume/util/volumehelper/volumehelper.go b/pkg/volume/util/volumehelper/volumehelper.go deleted file mode 100644 index d7dec49fa3..0000000000 --- a/pkg/volume/util/volumehelper/volumehelper.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package volumehelper contains consts and helper methods used by various -// volume components (attach/detach controller, kubelet, etc.). -package volumehelper - -import ( - "fmt" - "strings" - - "k8s.io/api/core/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/util/mount" - "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/types" -) - -const ( - // ControllerManagedAttachAnnotation is the key of the annotation on Node - // objects that indicates attach/detach operations for the node should be - // managed by the attach/detach controller - ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach" - - // KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node - // that decides if pod volumes are unmounted when pod is terminated - KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes" - - // VolumeGidAnnotationKey is the of the annotation on the PersistentVolume - // object that specifies a supplemental GID. - VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid" - - // VolumeDynamicallyCreatedByKey is the key of the annotation on PersistentVolume - // object created dynamically - VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby" -) - -// GetUniquePodName returns a unique identifier to reference a pod by -func GetUniquePodName(pod *v1.Pod) types.UniquePodName { - return types.UniquePodName(pod.UID) -} - -// GetUniqueVolumeName returns a unique name representing the volume/plugin. -// Caller should ensure that volumeName is a name/ID uniquely identifying the -// actual backing device, directory, path, etc. for a particular volume. -// The returned name can be used to uniquely reference the volume, for example, -// to prevent operations (attach/detach or mount/unmount) from being triggered -// on the same volume. -func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName { - return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName)) -} - -// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name -// for a non-attachable volume. -func GetUniqueVolumeNameForNonAttachableVolume( - podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName { - return v1.UniqueVolumeName( - fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name())) -} - -// GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique -// name representing the volume defined in the specified volume spec. -// This returned name can be used to uniquely reference the actual backing -// device, directory, path, etc. referenced by the given volumeSpec. -// If the given plugin does not support the volume spec, this returns an error. -func GetUniqueVolumeNameFromSpec( - volumePlugin volume.VolumePlugin, - volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) { - if volumePlugin == nil { - return "", fmt.Errorf( - "volumePlugin should not be nil. volumeSpec.Name=%q", - volumeSpec.Name()) - } - - volumeName, err := volumePlugin.GetVolumeName(volumeSpec) - if err != nil || volumeName == "" { - return "", fmt.Errorf( - "failed to GetVolumeName from volumePlugin for volumeSpec %q err=%v", - volumeSpec.Name(), - err) - } - - return GetUniqueVolumeName( - volumePlugin.GetPluginName(), - volumeName), - nil -} - -// IsPodTerminated checks if pod is terminated -func IsPodTerminated(pod *v1.Pod, podStatus v1.PodStatus) bool { - return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.ContainerStatuses)) -} - -// notRunning returns true if every status is terminated or waiting, or the status list -// is empty. -func notRunning(statuses []v1.ContainerStatus) bool { - for _, status := range statuses { - if status.State.Terminated == nil && status.State.Waiting == nil { - return false - } - } - return true -} - -// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow -// the fromat plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface, -// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of -// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface -// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs -// the unique volume names. -func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) { - components := strings.SplitN(string(uniqueName), "/", 3) - if len(components) != 3 { - return "", "", fmt.Errorf("cannot split volume unique name %s to plugin/volume components", uniqueName) - } - pluginName := fmt.Sprintf("%s/%s", components[0], components[1]) - return pluginName, components[2], nil -} - -// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter -// and Exec taken from given VolumeHost. -func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount { - mounter := host.GetMounter(pluginName) - exec := host.GetExec(pluginName) - return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec} -} - -// GetVolumeMode retrieves VolumeMode from pv. -// If the volume doesn't have PersistentVolume, it's an inline volume, -// should return volumeMode as filesystem to keep existing behavior. -func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) { - if volumeSpec == nil || volumeSpec.PersistentVolume == nil { - return v1.PersistentVolumeFilesystem, nil - } - if volumeSpec.PersistentVolume.Spec.VolumeMode != nil { - return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil - } - return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name()) -} - -// GetPersistentVolumeClaimVolumeMode retrieves VolumeMode from pvc. -func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.PersistentVolumeMode, error) { - if claim.Spec.VolumeMode != nil { - return *claim.Spec.VolumeMode, nil - } - return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name) -} - -// CheckVolumeModeFilesystem checks VolumeMode. -// If the mode is Filesystem, return true otherwise return false. -func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) { - if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - volumeMode, err := GetVolumeMode(volumeSpec) - if err != nil { - return true, err - } - if volumeMode == v1.PersistentVolumeBlock { - return false, nil - } - } - return true, nil -} diff --git a/pkg/volume/util/volumepathhandler/BUILD b/pkg/volume/util/volumepathhandler/BUILD new file mode 100644 index 0000000000..562bfa9067 --- /dev/null +++ b/pkg/volume/util/volumepathhandler/BUILD @@ -0,0 +1,63 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "volume_path_handler.go", + ] + select({ + "@io_bazel_rules_go//go/platform:android": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:darwin": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:dragonfly": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:freebsd": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "volume_path_handler_linux.go", + ], + "@io_bazel_rules_go//go/platform:nacl": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:netbsd": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:openbsd": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:plan9": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:solaris": [ + "volume_path_handler_unsupported.go", + ], + "@io_bazel_rules_go//go/platform:windows": [ + "volume_path_handler_unsupported.go", + ], + "//conditions:default": [], + }), + importpath = "k8s.io/kubernetes/pkg/volume/util/volumepathhandler", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/volume/util/volumepathhandler/volume_path_handler.go b/pkg/volume/util/volumepathhandler/volume_path_handler.go new file mode 100644 index 0000000000..61680c1157 --- /dev/null +++ b/pkg/volume/util/volumepathhandler/volume_path_handler.go @@ -0,0 +1,233 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumepathhandler + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/types" +) + +const ( + losetupPath = "losetup" + ErrDeviceNotFound = "device not found" + ErrDeviceNotSupported = "device not supported" +) + +// BlockVolumePathHandler defines a set of operations for handling block volume-related operations +type BlockVolumePathHandler interface { + // MapDevice creates a symbolic link to block device under specified map path + MapDevice(devicePath string, mapPath string, linkName string) error + // UnmapDevice removes a symbolic link to block device under specified map path + UnmapDevice(mapPath string, linkName string) error + // RemovePath removes a file or directory on specified map path + RemoveMapPath(mapPath string) error + // IsSymlinkExist retruns true if specified symbolic link exists + IsSymlinkExist(mapPath string) (bool, error) + // GetDeviceSymlinkRefs searches symbolic links under global map path + GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) + // FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath + // corresponding to map path symlink, and then return global map path with pod uuid. + FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) + // AttachFileDevice takes a path to a regular file and makes it available as an + // attached block device. + AttachFileDevice(path string) (string, error) + // GetLoopDevice returns the full path to the loop device associated with the given path. + GetLoopDevice(path string) (string, error) + // RemoveLoopDevice removes specified loopback device + RemoveLoopDevice(device string) error +} + +// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler. +func NewBlockVolumePathHandler() BlockVolumePathHandler { + var volumePathHandler VolumePathHandler + return volumePathHandler +} + +// VolumePathHandler is path related operation handlers for block volume +type VolumePathHandler struct { +} + +// MapDevice creates a symbolic link to block device under specified map path +func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error { + // Example of global map path: + // globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid} + // linkName: {podUid} + // + // Example of pod device map path: + // podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + // linkName: {volumeName} + if len(devicePath) == 0 { + return fmt.Errorf("Failed to map device to map path. devicePath is empty") + } + if len(mapPath) == 0 { + return fmt.Errorf("Failed to map device to map path. mapPath is empty") + } + if !filepath.IsAbs(mapPath) { + return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) + } + glog.V(5).Infof("MapDevice: devicePath %s", devicePath) + glog.V(5).Infof("MapDevice: mapPath %s", mapPath) + glog.V(5).Infof("MapDevice: linkName %s", linkName) + + // Check and create mapPath + _, err := os.Stat(mapPath) + if err != nil && !os.IsNotExist(err) { + glog.Errorf("cannot validate map path: %s", mapPath) + return err + } + if err = os.MkdirAll(mapPath, 0750); err != nil { + return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err) + } + // Remove old symbolic link(or file) then create new one. + // This should be done because current symbolic link is + // stale across node reboot. + linkPath := path.Join(mapPath, string(linkName)) + if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) { + return err + } + err = os.Symlink(devicePath, linkPath) + return err +} + +// UnmapDevice removes a symbolic link associated to block device under specified map path +func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { + if len(mapPath) == 0 { + return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") + } + glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) + glog.V(5).Infof("UnmapDevice: linkName %s", linkName) + + // Check symbolic link exists + linkPath := path.Join(mapPath, string(linkName)) + if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { + return checkErr + } else if !islinkExist { + glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) + return nil + } + err := os.Remove(linkPath) + return err +} + +// RemoveMapPath removes a file or directory on specified map path +func (v VolumePathHandler) RemoveMapPath(mapPath string) error { + if len(mapPath) == 0 { + return fmt.Errorf("Failed to remove map path. mapPath is empty") + } + glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) + err := os.RemoveAll(mapPath) + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// IsSymlinkExist returns true if specified file exists and the type is symbolik link. +// If file doesn't exist, or file exists but not symbolick link, return false with no error. +// On other cases, return false with error from Lstat(). +func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) { + fi, err := os.Lstat(mapPath) + if err == nil { + // If file exits and it's symbolick link, return true and no error + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + return true, nil + } + // If file exits but it's not symbolick link, return fale and no error + return false, nil + } + // If file doesn't exist, return false and no error + if os.IsNotExist(err) { + return false, nil + } + // Return error from Lstat() + return false, err +} + +// GetDeviceSymlinkRefs searches symbolic links under global map path +func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) { + var refs []string + files, err := ioutil.ReadDir(mapPath) + if err != nil { + return nil, fmt.Errorf("Directory cannot read %v", err) + } + for _, file := range files { + if file.Mode()&os.ModeSymlink != os.ModeSymlink { + continue + } + filename := file.Name() + filepath, err := os.Readlink(path.Join(mapPath, filename)) + if err != nil { + return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) + } + glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) + if filepath == devPath { + refs = append(refs, path.Join(mapPath, filename)) + } + } + glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) + return refs, nil +} + +// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath +// corresponding to map path symlink, and then return global map path with pod uuid. +// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX +// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX +func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { + var globalMapPathUUID string + // Find symbolic link named pod uuid under plugin dir + err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { + glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) + if res, err := compareSymlinks(path, mapPath); err == nil && res { + globalMapPathUUID = path + } + } + return nil + }) + if err != nil { + return "", err + } + glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) + // Return path contains global map path + {pod uuid} + return globalMapPathUUID, nil +} + +func compareSymlinks(global, pod string) (bool, error) { + devGlobal, err := os.Readlink(global) + if err != nil { + return false, err + } + devPod, err := os.Readlink(pod) + if err != nil { + return false, err + } + glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) + if devGlobal == devPod { + return true, nil + } + return false, nil +} diff --git a/pkg/volume/util/util_linux.go b/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go similarity index 97% rename from pkg/volume/util/util_linux.go rename to pkg/volume/util/volumepathhandler/volume_path_handler_linux.go index 59b707bcc4..f9a886d7dc 100644 --- a/pkg/volume/util/util_linux.go +++ b/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go @@ -1,7 +1,7 @@ // +build linux /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package volumepathhandler import ( "errors" diff --git a/pkg/volume/util/util_unsupported.go b/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go similarity index 95% rename from pkg/volume/util/util_unsupported.go rename to pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go index 930e4f663d..266398b1da 100644 --- a/pkg/volume/util/util_unsupported.go +++ b/pkg/volume/util/volumepathhandler/volume_path_handler_unsupported.go @@ -1,7 +1,7 @@ // +build !linux /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package volumepathhandler import ( "fmt" diff --git a/pkg/volume/util_test.go b/pkg/volume/util_test.go deleted file mode 100644 index 011793defe..0000000000 --- a/pkg/volume/util_test.go +++ /dev/null @@ -1,902 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volume - -import ( - "fmt" - "hash/fnv" - "reflect" - "strings" - "testing" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/watch" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/util/slice" -) - -type testcase struct { - // Input of the test - name string - existingPod *v1.Pod - createPod *v1.Pod - // eventSequence is list of events that are simulated during recycling. It - // can be either event generated by a recycler pod or a state change of - // the pod. (see newPodEvent and newEvent below). - eventSequence []watch.Event - - // Expected output. - // expectedEvents is list of events that were sent to the volume that was - // recycled. - expectedEvents []mockEvent - expectedError string -} - -func newPodEvent(eventtype watch.EventType, name string, phase v1.PodPhase, message string) watch.Event { - return watch.Event{ - Type: eventtype, - Object: newPod(name, phase, message), - } -} - -func newEvent(eventtype, message string) watch.Event { - return watch.Event{ - Type: watch.Added, - Object: &v1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - }, - Reason: "MockEvent", - Message: message, - Type: eventtype, - }, - } -} - -func newPod(name string, phase v1.PodPhase, message string) *v1.Pod { - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: name, - }, - Status: v1.PodStatus{ - Phase: phase, - Message: message, - }, - } -} - -func TestRecyclerPod(t *testing.T) { - tests := []testcase{ - { - // Test recycler success with some events - name: "RecyclerSuccess", - createPod: newPod("podRecyclerSuccess", v1.PodPending, ""), - eventSequence: []watch.Event{ - // Pod gets Running and Succeeded - newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""), - newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"), - newEvent(v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""), - newEvent(v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""), - newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"), - newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"), - newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""), - newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodSucceeded, ""), - }, - expectedEvents: []mockEvent{ - {v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"}, - {v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""}, - {v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""}, - {v1.EventTypeNormal, "Created container with docker id 83d929aeac82"}, - {v1.EventTypeNormal, "Started container with docker id 83d929aeac82"}, - }, - expectedError: "", - }, - { - // Test recycler failure with some events - name: "RecyclerFailure", - createPod: newPod("podRecyclerFailure", v1.PodPending, ""), - eventSequence: []watch.Event{ - // Pod gets Running and Succeeded - newPodEvent(watch.Added, "podRecyclerFailure", v1.PodPending, ""), - newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"), - newEvent(v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"), - newEvent(v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"), - newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodRunning, ""), - newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodFailed, "Pod was active on the node longer than specified deadline"), - }, - expectedEvents: []mockEvent{ - {v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"}, - {v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"}, - {v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"}, - }, - expectedError: "failed to recycle volume: Pod was active on the node longer than specified deadline", - }, - { - // Recycler pod gets deleted - name: "RecyclerDeleted", - createPod: newPod("podRecyclerDeleted", v1.PodPending, ""), - eventSequence: []watch.Event{ - // Pod gets Running and Succeeded - newPodEvent(watch.Added, "podRecyclerDeleted", v1.PodPending, ""), - newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"), - newPodEvent(watch.Deleted, "podRecyclerDeleted", v1.PodPending, ""), - }, - expectedEvents: []mockEvent{ - {v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"}, - }, - expectedError: "failed to recycle volume: recycler pod was deleted", - }, - { - // Another recycler pod is already running - name: "RecyclerRunning", - existingPod: newPod("podOldRecycler", v1.PodRunning, ""), - createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"), - eventSequence: []watch.Event{}, - expectedError: "old recycler pod found, will retry later", - }, - } - - for _, test := range tests { - t.Logf("Test %q", test.name) - client := &mockRecyclerClient{ - events: test.eventSequence, - pod: test.existingPod, - } - err := internalRecycleVolumeByWatchingPodUntilCompletion(test.createPod.Name, test.createPod, client) - receivedError := "" - if err != nil { - receivedError = err.Error() - } - if receivedError != test.expectedError { - t.Errorf("Test %q failed, expected error %q, got %q", test.name, test.expectedError, receivedError) - continue - } - if !client.deletedCalled { - t.Errorf("Test %q failed, expected deferred client.Delete to be called on recycler pod", test.name) - continue - } - for i, expectedEvent := range test.expectedEvents { - if len(client.receivedEvents) <= i { - t.Errorf("Test %q failed, expected event %d: %q not received", test.name, i, expectedEvent.message) - continue - } - receivedEvent := client.receivedEvents[i] - if expectedEvent.eventtype != receivedEvent.eventtype { - t.Errorf("Test %q failed, event %d does not match: expected eventtype %q, got %q", test.name, i, expectedEvent.eventtype, receivedEvent.eventtype) - } - if expectedEvent.message != receivedEvent.message { - t.Errorf("Test %q failed, event %d does not match: expected message %q, got %q", test.name, i, expectedEvent.message, receivedEvent.message) - } - } - for i := len(test.expectedEvents); i < len(client.receivedEvents); i++ { - t.Errorf("Test %q failed, unexpected event received: %s, %q", test.name, client.receivedEvents[i].eventtype, client.receivedEvents[i].message) - } - } -} - -type mockRecyclerClient struct { - pod *v1.Pod - deletedCalled bool - receivedEvents []mockEvent - events []watch.Event -} - -type mockEvent struct { - eventtype, message string -} - -func (c *mockRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) { - if c.pod == nil { - c.pod = pod - return c.pod, nil - } - // Simulate "already exists" error - return nil, errors.NewAlreadyExists(api.Resource("pods"), pod.Name) -} - -func (c *mockRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) { - if c.pod != nil { - return c.pod, nil - } else { - return nil, fmt.Errorf("pod does not exist") - } -} - -func (c *mockRecyclerClient) DeletePod(name, namespace string) error { - c.deletedCalled = true - return nil -} - -func (c *mockRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) { - eventCh := make(chan watch.Event, 0) - go func() { - for _, e := range c.events { - eventCh <- e - } - }() - return eventCh, nil -} - -func (c *mockRecyclerClient) Event(eventtype, message string) { - c.receivedEvents = append(c.receivedEvents, mockEvent{eventtype, message}) -} - -func TestCalculateTimeoutForVolume(t *testing.T) { - pv := &v1.PersistentVolume{ - Spec: v1.PersistentVolumeSpec{ - Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"), - }, - }, - } - - timeout := CalculateTimeoutForVolume(50, 30, pv) - if timeout != 50 { - t.Errorf("Expected 50 for timeout but got %v", timeout) - } - - pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi") - timeout = CalculateTimeoutForVolume(50, 30, pv) - if timeout != 60 { - t.Errorf("Expected 60 for timeout but got %v", timeout) - } - - pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi") - timeout = CalculateTimeoutForVolume(50, 30, pv) - if timeout != 4500 { - t.Errorf("Expected 4500 for timeout but got %v", timeout) - } -} - -func TestGenerateVolumeName(t *testing.T) { - - // Normal operation, no truncate - v1 := GenerateVolumeName("kubernetes", "pv-cinder-abcde", 255) - if v1 != "kubernetes-dynamic-pv-cinder-abcde" { - t.Errorf("Expected kubernetes-dynamic-pv-cinder-abcde, got %s", v1) - } - - // Truncate trailing "6789-dynamic" - prefix := strings.Repeat("0123456789", 9) // 90 characters prefix + 8 chars. of "-dynamic" - v2 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100) - expect := prefix[:84] + "-pv-cinder-abcde" - if v2 != expect { - t.Errorf("Expected %s, got %s", expect, v2) - } - - // Truncate really long cluster name - prefix = strings.Repeat("0123456789", 1000) // 10000 characters prefix - v3 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100) - if v3 != expect { - t.Errorf("Expected %s, got %s", expect, v3) - } -} - -func TestMountOptionFromSpec(t *testing.T) { - scenarios := map[string]struct { - volume *Spec - expectedMountList []string - systemOptions []string - }{ - "volume-with-mount-options": { - volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false}, - }, - }), - expectedMountList: []string{"ro", "nfsvers=3"}, - systemOptions: nil, - }, - "volume-with-bad-mount-options": { - volume: createVolumeSpecWithMountOption("good-mount-opts", "", v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false}, - }, - }), - expectedMountList: []string{}, - systemOptions: nil, - }, - "vol-with-sys-opts": { - volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false}, - }, - }), - expectedMountList: []string{"ro", "nfsvers=3", "fsid=100", "hard"}, - systemOptions: []string{"fsid=100", "hard"}, - }, - "vol-with-sys-opts-with-dup": { - volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false}, - }, - }), - expectedMountList: []string{"ro", "nfsvers=3", "fsid=100"}, - systemOptions: []string{"fsid=100", "ro"}, - }, - } - - for name, scenario := range scenarios { - mountOptions := MountOptionFromSpec(scenario.volume, scenario.systemOptions...) - if !reflect.DeepEqual(slice.SortStrings(mountOptions), slice.SortStrings(scenario.expectedMountList)) { - t.Errorf("for %s expected mount options : %v got %v", name, scenario.expectedMountList, mountOptions) - } - } -} - -func createVolumeSpecWithMountOption(name string, mountOptions string, spec v1.PersistentVolumeSpec) *Spec { - annotations := map[string]string{ - v1.MountOptionAnnotation: mountOptions, - } - objMeta := metav1.ObjectMeta{ - Name: name, - Annotations: annotations, - } - - pv := &v1.PersistentVolume{ - ObjectMeta: objMeta, - Spec: spec, - } - return &Spec{PersistentVolume: pv} -} - -func checkFnv32(t *testing.T, s string, expected int) { - h := fnv.New32() - h.Write([]byte(s)) - h.Sum32() - - if int(h.Sum32()) != expected { - t.Fatalf("hash of %q was %v, expected %v", s, h.Sum32(), expected) - } -} - -func TestChooseZoneForVolume(t *testing.T) { - checkFnv32(t, "henley", 1180403676) - // 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection - - // A few others - checkFnv32(t, "henley-", 2652299129) - checkFnv32(t, "henley-a", 1459735322) - checkFnv32(t, "", 2166136261) - - tests := []struct { - Zones sets.String - VolumeName string - Expected string - }{ - // Test for PVC names that don't have a dash - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley", - Expected: "a", // hash("henley") == 0 - }, - // Tests for PVC names that end in - number, but don't look like statefulset PVCs - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-0", - Expected: "a", // hash("henley") == 0 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-1", - Expected: "b", // hash("henley") + 1 == 1 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-2", - Expected: "c", // hash("henley") + 2 == 2 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-3", - Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-4", - Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3 - }, - // Tests for PVC names that are edge cases - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-", - Expected: "c", // hash("henley-") = 2652299129 === 2 mod 3 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-a", - Expected: "c", // hash("henley-a") = 1459735322 === 2 mod 3 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium--1", - Expected: "c", // hash("") + 1 == 2166136261 + 1 === 2 mod 3 - }, - // Tests for PVC names for simple StatefulSet cases - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-1", - Expected: "b", // hash("henley") + 1 == 1 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "loud-henley-1", - Expected: "b", // hash("henley") + 1 == 1 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "quiet-henley-2", - Expected: "c", // hash("henley") + 2 == 2 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-2", - Expected: "c", // hash("henley") + 2 == 2 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-3", - Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-4", - Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3 - }, - // Tests for statefulsets (or claims) with dashes in the names - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-alpha-henley-2", - Expected: "c", // hash("henley") + 2 == 2 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-beta-henley-3", - Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-gamma-henley-4", - Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3 - }, - // Tests for statefulsets name ending in - - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--2", - Expected: "a", // hash("") + 2 == 0 mod 3 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--3", - Expected: "b", // hash("") + 3 == 1 mod 3 - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--4", - Expected: "c", // hash("") + 4 == 2 mod 3 - }, - } - - for _, test := range tests { - actual := ChooseZoneForVolume(test.Zones, test.VolumeName) - - if actual != test.Expected { - t.Errorf("Test %v failed, expected zone %q, actual %q", test, test.Expected, actual) - } - } -} - -func TestChooseZonesForVolume(t *testing.T) { - checkFnv32(t, "henley", 1180403676) - // 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection - - // A few others - checkFnv32(t, "henley-", 2652299129) - checkFnv32(t, "henley-a", 1459735322) - checkFnv32(t, "", 2166136261) - - tests := []struct { - Zones sets.String - VolumeName string - NumZones uint32 - Expected sets.String - }{ - // Test for PVC names that don't have a dash - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley", - NumZones: 1, - Expected: sets.NewString("a" /* hash("henley") == 0 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley", - NumZones: 2, - Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"), - }, - // Tests for PVC names that end in - number, but don't look like statefulset PVCs - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-0", - NumZones: 1, - Expected: sets.NewString("a" /* hash("henley") == 0 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-0", - NumZones: 2, - Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-1", - NumZones: 1, - Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-1", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-2", - NumZones: 1, - Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-2", - NumZones: 2, - Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-3", - NumZones: 1, - Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-3", - NumZones: 2, - Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 */, "b"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-4", - NumZones: 1, - Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-4", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 */, "a"), - }, - // Tests for PVC names that are edge cases - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-", - NumZones: 1, - Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 = 2 */, "a"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-a", - NumZones: 1, - Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "henley-a", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 = 2 */, "a"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium--1", - NumZones: 1, - Expected: sets.NewString("c" /* hash("") + 1 == 2166136261 + 1 === 2 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium--1", - NumZones: 2, - Expected: sets.NewString("a" /* hash("") + 1 + 1(startingIndex) == 2166136261 + 1 + 1 === 3 mod 3 = 0 */, "b"), - }, - // Tests for PVC names for simple StatefulSet cases - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-1", - NumZones: 1, - Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */), - }, - // Tests for PVC names for simple StatefulSet cases - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-1", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "loud-henley-1", - NumZones: 1, - Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "loud-henley-1", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "quiet-henley-2", - NumZones: 1, - Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "quiet-henley-2", - NumZones: 2, - Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-2", - NumZones: 1, - Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-2", - NumZones: 2, - Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-3", - NumZones: 1, - Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-3", - NumZones: 2, - Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 6 mod 3 = 0 */, "b"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-4", - NumZones: 1, - Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley-4", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"), - }, - // Tests for statefulsets (or claims) with dashes in the names - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-alpha-henley-2", - NumZones: 1, - Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-alpha-henley-2", - NumZones: 2, - Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-beta-henley-3", - NumZones: 1, - Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-beta-henley-3", - NumZones: 2, - Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 0 mod 3 */, "b"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-gamma-henley-4", - NumZones: 1, - Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-gamma-henley-4", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"), - }, - // Tests for statefulsets name ending in - - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--2", - NumZones: 1, - Expected: sets.NewString("a" /* hash("") + 2 == 0 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--2", - NumZones: 2, - Expected: sets.NewString("c" /* hash("") + 2 + 2(startingIndex) == 2 mod 3 */, "a"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--3", - NumZones: 1, - Expected: sets.NewString("b" /* hash("") + 3 == 1 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--3", - NumZones: 2, - Expected: sets.NewString("b" /* hash("") + 3 + 3(startingIndex) == 1 mod 3 */, "c"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--4", - NumZones: 1, - Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--4", - NumZones: 2, - Expected: sets.NewString("a" /* hash("") + 4 + 4(startingIndex) == 0 mod 3 */, "b"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--4", - NumZones: 3, - Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */, "a", "b"), - }, - { - Zones: sets.NewString("a", "b", "c"), - VolumeName: "medium-henley--4", - NumZones: 4, - Expected: sets.NewString("c" /* hash("") + 4 + 9(startingIndex) == 2 mod 3 */, "a", "b", "c"), - }, - { - Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), - VolumeName: "henley-0", - NumZones: 2, - Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"), - }, - { - Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), - VolumeName: "henley-1", - NumZones: 2, - Expected: sets.NewString("c" /* hash("henley") == 0 + 2 */, "d"), - }, - { - Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), - VolumeName: "henley-2", - NumZones: 2, - Expected: sets.NewString("e" /* hash("henley") == 0 + 2 + 2(startingIndex) */, "f"), - }, - { - Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), - VolumeName: "henley-3", - NumZones: 2, - Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h"), - }, - { - Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), - VolumeName: "henley-0", - NumZones: 3, - Expected: sets.NewString("a" /* hash("henley") == 0 */, "b", "c"), - }, - { - Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), - VolumeName: "henley-1", - NumZones: 3, - Expected: sets.NewString("d" /* hash("henley") == 0 + 1 + 2(startingIndex) */, "e", "f"), - }, - { - Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), - VolumeName: "henley-2", - NumZones: 3, - Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h", "i"), - }, - { - Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"), - VolumeName: "henley-3", - NumZones: 3, - Expected: sets.NewString("a" /* hash("henley") == 0 + 3 + 6(startingIndex) */, "b", "c"), - }, - } - - for _, test := range tests { - actual := ChooseZonesForVolume(test.Zones, test.VolumeName, test.NumZones) - - if !actual.Equal(test.Expected) { - t.Errorf("Test %v failed, expected zone %#v, actual %#v", test, test.Expected, actual) - } - } -} - -func TestValidateZone(t *testing.T) { - functionUnderTest := "ValidateZone" - - // First part: want an error - errCases := []string{"", " "} - for _, errCase := range errCases { - if got := ValidateZone(errCase); got == nil { - t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, errCase, got, "an error") - } - } - - // Second part: want no error - succCases := []string{" us-east-1a "} - for _, succCase := range succCases { - if got := ValidateZone(succCase); got != nil { - t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, succCase, got, nil) - } - } -} - -func TestGetWindowsPath(t *testing.T) { - tests := []struct { - path string - expectedPath string - }{ - { - path: `/var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4/volumes/kubernetes.io~disk`, - expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, - }, - { - path: `\var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, - expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`, - }, - { - path: `/`, - expectedPath: `c:\`, - }, - { - path: ``, - expectedPath: ``, - }, - } - - for _, test := range tests { - result := GetWindowsPath(test.path) - if result != test.expectedPath { - t.Errorf("GetWindowsPath(%v) returned (%v), want (%v)", test.path, result, test.expectedPath) - } - } -} diff --git a/pkg/volume/vsphere_volume/BUILD b/pkg/volume/vsphere_volume/BUILD index 025f3dc59b..f69c1fb6d4 100644 --- a/pkg/volume/vsphere_volume/BUILD +++ b/pkg/volume/vsphere_volume/BUILD @@ -23,7 +23,6 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/volume/vsphere_volume/attacher.go b/pkg/volume/vsphere_volume/attacher.go index fb9886beba..0cf3440d8a 100644 --- a/pkg/volume/vsphere_volume/attacher.go +++ b/pkg/volume/vsphere_volume/attacher.go @@ -30,7 +30,6 @@ import ( "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) type vsphereVMDKAttacher struct { @@ -219,8 +218,8 @@ func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath s options := []string{} if notMnt { - diskMounter := volumehelper.NewSafeFormatAndMountFromHost(vsphereVolumePluginName, attacher.host) - mountOptions := volume.MountOptionFromSpec(spec, options...) + diskMounter := volumeutil.NewSafeFormatAndMountFromHost(vsphereVolumePluginName, attacher.host) + mountOptions := volumeutil.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions) if err != nil { os.Remove(deviceMountPath) diff --git a/pkg/volume/vsphere_volume/vsphere_volume.go b/pkg/volume/vsphere_volume/vsphere_volume.go index 00f4ea74e8..7b1e611df5 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/pkg/volume/vsphere_volume/vsphere_volume.go @@ -31,7 +31,6 @@ import ( utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) // This is the primary entrypoint for volume plugins. @@ -120,7 +119,7 @@ func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)), }, fsType: fsType, - diskMounter: volumehelper.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil + diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil } func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) { @@ -350,7 +349,7 @@ func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeO } func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { + if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes()) } @@ -368,7 +367,7 @@ func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { Name: v.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "vsphere-volume-dynamic-provisioner", + util.VolumeDynamicallyCreatedByKey: "vsphere-volume-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/volume/vsphere_volume/vsphere_volume_util.go b/pkg/volume/vsphere_volume/vsphere_volume_util.go index 0f5d335ec5..14e235d5db 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -93,8 +93,8 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] volSizeBytes := capacity.Value() // vSphere works with kilobytes, convert to KiB with rounding up - volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024)) - name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255) + volSizeKB := int(volumeutil.RoundUpSize(volSizeBytes, 1024)) + name := volumeutil.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255) volumeOptions := &vclib.VolumeOptions{ CapacityKB: volSizeKB, Tags: *v.options.CloudTags, diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 00be1e8801..3784978b57 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -80,7 +80,7 @@ go_library( "//pkg/util/system:go_default_library", "//pkg/util/taints:go_default_library", "//pkg/util/version:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/manifest:go_default_library", diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index ee866b248b..f2f1ddaa28 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -38,7 +38,7 @@ import ( "k8s.io/kubernetes/pkg/api/testapi" awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -588,7 +588,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume GenerateName: pvConfig.NamePrefix, Labels: pvConfig.Labels, Annotations: map[string]string{ - volumehelper.VolumeGidAnnotationKey: "777", + util.VolumeGidAnnotationKey: "777", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/test/e2e/storage/vsphere/BUILD b/test/e2e/storage/vsphere/BUILD index ce2d1cab8e..c06139f25e 100644 --- a/test/e2e/storage/vsphere/BUILD +++ b/test/e2e/storage/vsphere/BUILD @@ -37,7 +37,7 @@ go_library( ], importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere", deps = [ - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/storage/utils:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 8cea902ca5..6f0118fcc5 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -37,7 +37,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -175,7 +175,7 @@ func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPo ObjectMeta: metav1.ObjectMeta{ GenerateName: pvConfig.NamePrefix, Annotations: map[string]string{ - volumehelper.VolumeGidAnnotationKey: "777", + util.VolumeGidAnnotationKey: "777", }, }, Spec: v1.PersistentVolumeSpec{ diff --git a/test/integration/volume/BUILD b/test/integration/volume/BUILD index 2b40a455cd..0f21d354ef 100644 --- a/test/integration/volume/BUILD +++ b/test/integration/volume/BUILD @@ -23,7 +23,7 @@ go_test( "//pkg/controller/volume/persistentvolume:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", + "//pkg/volume/util:go_default_library", "//test/integration/framework:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index b914ddfcc7..b4de32de05 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -34,7 +34,7 @@ import ( volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" + "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/integration/framework" ) @@ -86,7 +86,7 @@ func TestPodDeletionWithDswp(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-sandbox", Annotations: map[string]string{ - volumehelper.ControllerManagedAttachAnnotation: "true", + util.ControllerManagedAttachAnnotation: "true", }, }, } @@ -152,7 +152,7 @@ func TestPodUpdateWithWithADC(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-sandbox", Annotations: map[string]string{ - volumehelper.ControllerManagedAttachAnnotation: "true", + util.ControllerManagedAttachAnnotation: "true", }, }, } @@ -219,8 +219,8 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-sandbox", Annotations: map[string]string{ - volumehelper.ControllerManagedAttachAnnotation: "true", - volumehelper.KeepTerminatedPodVolumesAnnotation: "true", + util.ControllerManagedAttachAnnotation: "true", + util.KeepTerminatedPodVolumesAnnotation: "true", }, }, } @@ -383,7 +383,7 @@ func TestPodAddedByDswp(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "node-sandbox", Annotations: map[string]string{ - volumehelper.ControllerManagedAttachAnnotation: "true", + util.ControllerManagedAttachAnnotation: "true", }, }, }