Delete most volume drivers

k3s-v1.14.6
Darren Shepherd 2018-10-05 21:26:08 -07:00 committed by Erik Wilson
parent e4e3b74257
commit 1a01154162
76 changed files with 0 additions and 18066 deletions

View File

@ -28,19 +28,11 @@ import (
// Volume plugins
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csi"
"k8s.io/kubernetes/pkg/volume/fc"
"k8s.io/kubernetes/pkg/volume/flexvolume"
"k8s.io/kubernetes/pkg/volume/flocker"
"k8s.io/kubernetes/pkg/volume/glusterfs"
"k8s.io/kubernetes/pkg/volume/host_path"
"k8s.io/kubernetes/pkg/volume/iscsi"
"k8s.io/kubernetes/pkg/volume/local"
"k8s.io/kubernetes/pkg/volume/nfs"
"k8s.io/kubernetes/pkg/volume/portworx"
"k8s.io/kubernetes/pkg/volume/quobyte"
"k8s.io/kubernetes/pkg/volume/rbd"
"k8s.io/kubernetes/pkg/volume/scaleio"
"k8s.io/kubernetes/pkg/volume/storageos"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -56,12 +48,7 @@ import (
func ProbeAttachableVolumePlugins() []volume.VolumePlugin {
allPlugins := []volume.VolumePlugin{}
allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
}
@ -79,12 +66,6 @@ func GetDynamicPluginProber(config kubectrlmgrconfig.VolumeConfiguration) volume
func ProbeExpandableVolumePlugins(config kubectrlmgrconfig.VolumeConfiguration) []volume.VolumePlugin {
allPlugins := []volume.VolumePlugin{}
allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
return allPlugins
}
@ -123,16 +104,7 @@ func ProbeControllerVolumePlugins(config kubectrlmgrconfig.VolumeConfiguration)
klog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err)
}
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
// add rbd provisioner
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, quobyte.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, flocker.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIInlineVolume) {
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)

View File

@ -21,27 +21,17 @@ import (
"k8s.io/utils/exec"
// Volume plugins
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/cephfs"
"k8s.io/kubernetes/pkg/volume/configmap"
"k8s.io/kubernetes/pkg/volume/csi"
"k8s.io/kubernetes/pkg/volume/downwardapi"
"k8s.io/kubernetes/pkg/volume/emptydir"
"k8s.io/kubernetes/pkg/volume/fc"
"k8s.io/kubernetes/pkg/volume/flexvolume"
"k8s.io/kubernetes/pkg/volume/flocker"
"k8s.io/kubernetes/pkg/volume/git_repo"
"k8s.io/kubernetes/pkg/volume/glusterfs"
"k8s.io/kubernetes/pkg/volume/host_path"
"k8s.io/kubernetes/pkg/volume/iscsi"
"k8s.io/kubernetes/pkg/volume/local"
"k8s.io/kubernetes/pkg/volume/nfs"
"k8s.io/kubernetes/pkg/volume/portworx"
"k8s.io/kubernetes/pkg/volume/projected"
"k8s.io/kubernetes/pkg/volume/quobyte"
"k8s.io/kubernetes/pkg/volume/rbd"
"k8s.io/kubernetes/pkg/volume/scaleio"
"k8s.io/kubernetes/pkg/volume/secret"
"k8s.io/kubernetes/pkg/volume/storageos"
// features check
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
@ -58,24 +48,14 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
// Kubelet does not currently need to configure volume plugins.
// If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig
allPlugins = append(allPlugins, emptydir.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(volume.VolumeConfig{})...)
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...)
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, quobyte.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, flocker.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, configmap.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, projected.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, local.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
allPlugins = append(allPlugins, csi.ProbeVolumePlugins()...)
}

View File

@ -1,53 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"cephfs.go",
"doc.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/cephfs",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["cephfs_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,13 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- rootfs
- jsafrane
- saad-ali
reviewers:
- rootfs
- saad-ali
- jsafrane
- jingxu97
- msau42
- cofyc

View File

@ -1,478 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"fmt"
"os"
"os/exec"
"path"
"runtime"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
utilstrings "k8s.io/utils/strings"
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&cephfsPlugin{nil}}
}
type cephfsPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &cephfsPlugin{}
const (
cephfsPluginName = "kubernetes.io/cephfs"
)
func (plugin *cephfsPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *cephfsPlugin) GetPluginName() string {
return cephfsPluginName
}
func (plugin *cephfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
mon, _, _, _, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return fmt.Sprintf("%v", mon), nil
}
func (plugin *cephfsPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.Volume != nil && spec.Volume.CephFS != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CephFS != nil)
}
func (plugin *cephfsPlugin) IsMigratedToCSI() bool {
return false
}
func (plugin *cephfsPlugin) RequiresRemount() bool {
return false
}
func (plugin *cephfsPlugin) SupportsMountOption() bool {
return true
}
func (plugin *cephfsPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *cephfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
secretName, secretNs, err := getSecretNameAndNamespace(spec, pod.Namespace)
if err != nil {
return nil, err
}
secret := ""
if len(secretName) > 0 && len(secretNs) > 0 {
// if secret is provideded, retrieve it
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(secretNs).Get(secretName, metav1.GetOptions{})
if err != nil {
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", secretNs, secretName, err)
return nil, err
}
for name, data := range secrets.Data {
secret = string(data)
klog.V(4).Infof("found ceph secret info: %s", name)
}
}
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(plugin.GetPluginName()), secret)
}
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
mon, path, id, secretFile, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
if id == "" {
id = "admin"
}
if path == "" {
path = "/"
}
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
if secretFile == "" {
secretFile = "/etc/ceph/" + id + ".secret"
}
return &cephfsMounter{
cephfs: &cephfs{
podUID: podUID,
volName: spec.Name(),
mon: mon,
path: path,
secret: secret,
id: id,
secretFile: secretFile,
readonly: readOnly,
mounter: mounter,
plugin: plugin,
mountOptions: util.MountOptionFromSpec(spec),
},
}, nil
}
func (plugin *cephfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *cephfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &cephfsUnmounter{
cephfs: &cephfs{
podUID: podUID,
volName: volName,
mounter: mounter,
plugin: plugin},
}, nil
}
func (plugin *cephfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
cephfsVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{},
Path: mountPath,
},
},
}
return volume.NewSpecFromVolume(cephfsVolume), nil
}
// CephFS volumes represent a bare host file or directory mount of an CephFS export.
type cephfs struct {
volName string
podUID types.UID
mon []string
path string
id string
secret string
secretFile string
readonly bool
mounter mount.Interface
plugin *cephfsPlugin
volume.MetricsNil
mountOptions []string
}
type cephfsMounter struct {
*cephfs
}
var _ volume.Mounter = &cephfsMounter{}
func (cephfsVolume *cephfsMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: cephfsVolume.readonly,
Managed: false,
SupportsSELinux: false,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (cephfsVolume *cephfsMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error {
return cephfsVolume.SetUpAt(cephfsVolume.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
klog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
return err
}
if !notMnt {
return nil
}
if err := os.MkdirAll(dir, 0750); err != nil {
return err
}
// check whether it belongs to fuse, if not, default to use kernel mount.
if cephfsVolume.checkFuseMount() {
klog.V(4).Info("CephFS fuse mount.")
err = cephfsVolume.execFuseMount(dir)
// cleanup no matter if fuse mount fail.
keyringPath := cephfsVolume.GetKeyringPath()
_, StatErr := os.Stat(keyringPath)
if !os.IsNotExist(StatErr) {
os.RemoveAll(keyringPath)
}
if err == nil {
// cephfs fuse mount succeeded.
return nil
}
// if cephfs fuse mount failed, fallback to kernel mount.
klog.V(2).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err)
}
klog.V(4).Info("CephFS kernel mount.")
err = cephfsVolume.execMount(dir)
if err != nil {
// cleanup upon failure.
mount.CleanupMountPoint(dir, cephfsVolume.mounter, false)
return err
}
return nil
}
type cephfsUnmounter struct {
*cephfs
}
var _ volume.Unmounter = &cephfsUnmounter{}
// TearDown unmounts the bind mount
func (cephfsVolume *cephfsUnmounter) TearDown() error {
return cephfsVolume.TearDownAt(cephfsVolume.GetPath())
}
// TearDownAt unmounts the bind mount
func (cephfsVolume *cephfsUnmounter) TearDownAt(dir string) error {
return mount.CleanupMountPoint(dir, cephfsVolume.mounter, false)
}
// GetPath creates global mount path
func (cephfsVolume *cephfs) GetPath() string {
name := cephfsPluginName
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedName(name), cephfsVolume.volName)
}
// GetKeyringPath creates cephfuse keyring path
func (cephfsVolume *cephfs) GetKeyringPath() string {
name := cephfsPluginName
volumeDir := cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedName(name), cephfsVolume.volName)
volumeKeyringDir := volumeDir + "~keyring"
return volumeKeyringDir
}
func (cephfsVolume *cephfs) execMount(mountpoint string) error {
// cephfs mount option
cephOpt := ""
// override secretfile if secret is provided
if cephfsVolume.secret != "" {
cephOpt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret
} else {
cephOpt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secretFile
}
// build option array
opt := []string{}
if cephfsVolume.readonly {
opt = append(opt, "ro")
}
opt = append(opt, cephOpt)
// build src like mon1:6789,mon2:6789,mon3:6789:/
hosts := cephfsVolume.mon
l := len(hosts)
// pass all monitors and let ceph randomize and fail over
i := 0
src := ""
for i = 0; i < l-1; i++ {
src += hosts[i] + ","
}
src += hosts[i] + ":" + cephfsVolume.path
opt = util.JoinMountOptions(cephfsVolume.mountOptions, opt)
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil {
return fmt.Errorf("CephFS: mount failed: %v", err)
}
return nil
}
func (cephfsVolume *cephfsMounter) checkFuseMount() bool {
execute := cephfsVolume.plugin.host.GetExec(cephfsVolume.plugin.GetPluginName())
switch runtime.GOOS {
case "linux":
if _, err := execute.Run("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph"); err == nil {
klog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.")
return true
}
return false
}
return false
}
func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error {
// cephfs keyring file
keyringFile := ""
// override secretfile if secret is provided
if cephfsVolume.secret != "" {
// TODO: cephfs fuse currently doesn't support secret option,
// remove keyring file create once secret option is supported.
klog.V(4).Info("cephfs mount begin using fuse.")
keyringPath := cephfsVolume.GetKeyringPath()
os.MkdirAll(keyringPath, 0750)
payload := make(map[string]util.FileProjection, 1)
var fileProjection util.FileProjection
keyring := fmt.Sprintf("[client.%s]\nkey = %s\n", cephfsVolume.id, cephfsVolume.secret)
fileProjection.Data = []byte(keyring)
fileProjection.Mode = int32(0644)
fileName := cephfsVolume.id + ".keyring"
payload[fileName] = fileProjection
writerContext := fmt.Sprintf("cephfuse:%v.keyring", cephfsVolume.id)
writer, err := util.NewAtomicWriter(keyringPath, writerContext)
if err != nil {
klog.Errorf("failed to create atomic writer: %v", err)
return err
}
err = writer.Write(payload)
if err != nil {
klog.Errorf("failed to write payload to dir: %v", err)
return err
}
keyringFile = path.Join(keyringPath, fileName)
} else {
keyringFile = cephfsVolume.secretFile
}
// build src like mon1:6789,mon2:6789,mon3:6789:/
hosts := cephfsVolume.mon
l := len(hosts)
// pass all monitors and let ceph randomize and fail over
i := 0
src := ""
for i = 0; i < l-1; i++ {
src += hosts[i] + ","
}
src += hosts[i]
mountArgs := []string{}
mountArgs = append(mountArgs, "-k")
mountArgs = append(mountArgs, keyringFile)
mountArgs = append(mountArgs, "-m")
mountArgs = append(mountArgs, src)
mountArgs = append(mountArgs, mountpoint)
mountArgs = append(mountArgs, "-r")
mountArgs = append(mountArgs, cephfsVolume.path)
mountArgs = append(mountArgs, "--id")
mountArgs = append(mountArgs, cephfsVolume.id)
// build option array
opt := []string{}
if cephfsVolume.readonly {
opt = append(opt, "ro")
}
opt = util.JoinMountOptions(cephfsVolume.mountOptions, opt)
if len(opt) > 0 {
mountArgs = append(mountArgs, "-o")
mountArgs = append(mountArgs, strings.Join(opt, ","))
}
klog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs)
command := exec.Command("ceph-fuse", mountArgs...)
output, err := command.CombinedOutput()
if err != nil || !(strings.Contains(string(output), "starting fuse")) {
return fmt.Errorf("Ceph-fuse failed: %v\narguments: %s\nOutput: %s", err, mountArgs, string(output))
}
return nil
}
func getVolumeSource(spec *volume.Spec) ([]string, string, string, string, bool, error) {
if spec.Volume != nil && spec.Volume.CephFS != nil {
mon := spec.Volume.CephFS.Monitors
path := spec.Volume.CephFS.Path
user := spec.Volume.CephFS.User
secretFile := spec.Volume.CephFS.SecretFile
readOnly := spec.Volume.CephFS.ReadOnly
return mon, path, user, secretFile, readOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CephFS != nil {
mon := spec.PersistentVolume.Spec.CephFS.Monitors
path := spec.PersistentVolume.Spec.CephFS.Path
user := spec.PersistentVolume.Spec.CephFS.User
secretFile := spec.PersistentVolume.Spec.CephFS.SecretFile
readOnly := spec.PersistentVolume.Spec.CephFS.ReadOnly
return mon, path, user, secretFile, readOnly, nil
}
return nil, "", "", "", false, fmt.Errorf("Spec does not reference a CephFS volume type")
}
func getSecretNameAndNamespace(spec *volume.Spec, defaultNamespace string) (string, string, error) {
if spec.Volume != nil && spec.Volume.CephFS != nil {
localSecretRef := spec.Volume.CephFS.SecretRef
if localSecretRef != nil {
return localSecretRef.Name, defaultNamespace, nil
}
return "", "", nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.CephFS != nil {
secretRef := spec.PersistentVolume.Spec.CephFS.SecretRef
secretNs := defaultNamespace
if secretRef != nil {
if len(secretRef.Namespace) != 0 {
secretNs = secretRef.Namespace
}
return secretRef.Name, secretNs, nil
}
return "", "", nil
}
return "", "", fmt.Errorf("Spec does not reference an CephFS volume type")
}

View File

@ -1,247 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"os"
"path"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/cephfs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{CephFS: &v1.CephFSVolumeSource{}}}}) {
t.Errorf("Expected true")
}
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: nil,
SecretFile: "/etc/ceph/user.secret",
},
},
}
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volumePath := mounter.GetPath()
volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1")
if volumePath != volpath {
t.Errorf("Got unexpected path: %s", volumePath)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}
func TestConstructVolumeSpec(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephTest")
if err != nil {
t.Fatalf("Can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("can't find cephfs plugin by name")
}
cephfsSpec, err := plug.(*cephfsPlugin).ConstructVolumeSpec("cephfsVolume", "/cephfsVolume/")
if cephfsSpec.Name() != "cephfsVolume" {
t.Errorf("Get wrong cephfs spec name, got: %s", cephfsSpec.Name())
}
}
type testcase struct {
name string
defaultNs string
spec *volume.Spec
// Expected return of the test
expectedName string
expectedNs string
expectedError error
}
func TestGetSecretNameAndNamespaceForPV(t *testing.T) {
tests := []testcase{
{
name: "persistent volume source",
defaultNs: "default",
spec: &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CephFS: &v1.CephFSPersistentVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: &v1.SecretReference{
Name: "name",
Namespace: "ns",
},
SecretFile: "/etc/ceph/user.secret",
},
},
},
},
},
expectedName: "name",
expectedNs: "ns",
expectedError: nil,
},
{
name: "persistent volume source without namespace",
defaultNs: "default",
spec: &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CephFS: &v1.CephFSPersistentVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: &v1.SecretReference{
Name: "name",
},
SecretFile: "/etc/ceph/user.secret",
},
},
},
},
},
expectedName: "name",
expectedNs: "default",
expectedError: nil,
},
{
name: "pod volume source",
defaultNs: "default",
spec: &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: &v1.LocalObjectReference{
Name: "name",
},
SecretFile: "/etc/ceph/user.secret",
},
},
},
},
expectedName: "name",
expectedNs: "default",
expectedError: nil,
},
}
for _, testcase := range tests {
resultName, resultNs, err := getSecretNameAndNamespace(testcase.spec, testcase.defaultNs)
if err != testcase.expectedError || resultName != testcase.expectedName || resultNs != testcase.expectedNs {
t.Errorf("%s failed: expected err=%v ns=%q name=%q, got %v/%q/%q", testcase.name, testcase.expectedError, testcase.expectedNs, testcase.expectedName,
err, resultNs, resultName)
}
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cephfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
modes := plug.GetAccessModes()
for _, v := range modes {
if !volumetest.ContainsAccessMode(modes, v) {
t.Errorf("Expected AccessModeTypes: %s", v)
}
}
}

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cephfs contains the internal representation of Ceph file system
// (CephFS) volumes.
package cephfs // import "k8s.io/kubernetes/pkg/volume/cephfs"

View File

@ -1,65 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"disk_manager.go",
"doc.go",
"fc.go",
"fc_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/fc",
deps = [
"//pkg/features:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"fc_test.go",
"fc_util_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,12 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- rootfs
- saad-ali
reviewers:
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42
- mtanino

View File

@ -1,240 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
type fcAttacher struct {
host volume.VolumeHost
manager diskManager
}
var _ volume.Attacher = &fcAttacher{}
var _ volume.DeviceMounter = &fcAttacher{}
var _ volume.AttachableVolumePlugin = &fcPlugin{}
var _ volume.DeviceMountableVolumePlugin = &fcPlugin{}
func (plugin *fcPlugin) NewAttacher() (volume.Attacher, error) {
return &fcAttacher{
host: plugin.host,
manager: &fcUtil{},
}, nil
}
func (plugin *fcPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *fcPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mounter.GetMountRefs(deviceMountPath)
}
func (attacher *fcAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
return "", nil
}
func (attacher *fcAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
for _, spec := range specs {
volumesAttachedCheck[spec] = true
}
return volumesAttachedCheck, nil
}
func (attacher *fcAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host)
if err != nil {
klog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
return attacher.manager.AttachDisk(*mounter)
}
func (attacher *fcAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host)
if err != nil {
klog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
return attacher.manager.MakeGlobalPDName(*mounter.fcDisk), nil
}
func (attacher *fcAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter(fcPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if readOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(fcPluginName)}
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
}
}
return nil
}
type fcDetacher struct {
mounter mount.Interface
manager diskManager
}
var _ volume.Detacher = &fcDetacher{}
var _ volume.DeviceUnmounter = &fcDetacher{}
func (plugin *fcPlugin) NewDetacher() (volume.Detacher, error) {
return &fcDetacher{
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
manager: &fcUtil{},
}, nil
}
func (plugin *fcPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (detacher *fcDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}
func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error {
// Specify device name for DetachDisk later
devName, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
if err != nil {
klog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err)
return err
}
// Unmount for deviceMountPath(=globalPDPath)
err = mount.CleanupMountPoint(deviceMountPath, detacher.mounter, false)
if err != nil {
return fmt.Errorf("fc: failed to unmount: %s\nError: %v", deviceMountPath, err)
}
unMounter := volumeSpecToUnmounter(detacher.mounter)
err = detacher.manager.DetachDisk(*unMounter, devName)
if err != nil {
return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", devName, err)
}
klog.V(4).Infof("fc: successfully detached disk: %s", devName)
return nil
}
func (plugin *fcPlugin) CanAttach(spec *volume.Spec) bool {
return true
}
func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMounter, error) {
fc, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
var lun string
var wwids []string
if fc.Lun != nil && len(fc.TargetWWNs) != 0 {
lun = strconv.Itoa(int(*fc.Lun))
} else if len(fc.WWIDs) != 0 {
for _, wwid := range fc.WWIDs {
wwids = append(wwids, strings.Replace(wwid, " ", "_", -1))
}
} else {
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mounter")
}
fcDisk := &fcDisk{
plugin: &fcPlugin{
host: host,
},
wwns: fc.TargetWWNs,
lun: lun,
wwids: wwids,
io: &osIOHandler{},
}
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode, err := volumeutil.GetVolumeMode(spec)
if err != nil {
return nil, err
}
klog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode)
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
volumeMode: volumeMode,
readOnly: readOnly,
mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host),
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
}, nil
}
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
readOnly: readOnly,
mounter: volumeutil.NewSafeFormatAndMountFromHost(fcPluginName, host),
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
}, nil
}
func volumeSpecToUnmounter(mounter mount.Interface) *fcDiskUnmounter {
return &fcDiskUnmounter{
fcDisk: &fcDisk{
io: &osIOHandler{},
},
mounter: mounter,
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
}
}

View File

@ -1,96 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"os"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// Abstract interface to disk operations.
type diskManager interface {
MakeGlobalPDName(disk fcDisk) string
MakeGlobalVDPDName(disk fcDisk) string
// Attaches the disk to the kubelet's host machine.
AttachDisk(b fcDiskMounter) (string, error)
// Detaches the disk from the kubelet's host machine.
DetachDisk(disk fcDiskUnmounter, devicePath string) error
// Detaches the block disk from the kubelet's host machine.
DetachBlockFCDisk(disk fcDiskUnmapper, mntPath, devicePath string) error
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
globalPDPath := manager.MakeGlobalPDName(*b.fcDisk)
noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if !noMnt {
return nil
}
if err := os.MkdirAll(volPath, 0750); err != nil {
klog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
mountOptions := util.JoinMountOptions(options, b.mountOptions)
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
if err != nil {
klog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err)
noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil {
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !noMnt {
if mntErr = b.mounter.Unmount(volPath); mntErr != nil {
klog.Errorf("Failed to unmount: %v", mntErr)
return err
}
noMnt, mntErr = b.mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil {
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !noMnt {
// will most likely retry on next sync loop.
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath)
return err
}
}
os.Remove(volPath)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(&b, fsGroup)
}
return nil
}

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fc contains the internal representation of
// Fibre Channel (fc) volumes.
package fc // import "k8s.io/kubernetes/pkg/volume/fc"

View File

@ -1,498 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
utilstrings "k8s.io/utils/strings"
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&fcPlugin{nil}}
}
type fcPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &fcPlugin{}
var _ volume.PersistentVolumePlugin = &fcPlugin{}
var _ volume.BlockVolumePlugin = &fcPlugin{}
const (
fcPluginName = "kubernetes.io/fc"
)
func (plugin *fcPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *fcPlugin) GetPluginName() string {
return fcPluginName
}
func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
// API server validates these parameters beforehand but attach/detach
// controller creates volumespec without validation. They may be nil
// or zero length. We should check again to avoid unexpected conditions.
if len(volumeSource.TargetWWNs) != 0 && volumeSource.Lun != nil {
// TargetWWNs are the FibreChannel target worldwide names
return fmt.Sprintf("%v:%v", volumeSource.TargetWWNs, *volumeSource.Lun), nil
} else if len(volumeSource.WWIDs) != 0 {
// WWIDs are the FibreChannel World Wide Identifiers
return fmt.Sprintf("%v", volumeSource.WWIDs), nil
}
return "", err
}
func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.Volume != nil && spec.Volume.FC != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FC != nil)
}
func (plugin *fcPlugin) IsMigratedToCSI() bool {
return false
}
func (plugin *fcPlugin) RequiresRemount() bool {
return false
}
func (plugin *fcPlugin) SupportsMountOption() bool {
return false
}
func (plugin *fcPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *fcPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
fc, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
wwns, lun, wwids, err := getWwnsLunWwids(fc)
if err != nil {
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mounter")
}
fcDisk := &fcDisk{
podUID: podUID,
volName: spec.Name(),
wwns: wwns,
lun: lun,
wwids: wwids,
manager: manager,
io: &osIOHandler{},
plugin: plugin,
}
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode, err := util.GetVolumeMode(spec)
if err != nil {
return nil, err
}
klog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode)
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
volumeMode: volumeMode,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
mountOptions: []string{},
}, nil
}
return &fcDiskMounter{
fcDisk: fcDisk,
fsType: fc.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
mountOptions: util.MountOptionFromSpec(spec),
}, nil
}
func (plugin *fcPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
// If this called via GenerateUnmapDeviceFunc(), pod is nil.
// Pass empty string as dummy uid since uid isn't used in the case.
var uid types.UID
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, exec mount.Exec) (volume.BlockVolumeMapper, error) {
fc, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
wwns, lun, wwids, err := getWwnsLunWwids(fc)
if err != nil {
return nil, fmt.Errorf("fc: no fc disk information found. failed to make a new mapper")
}
return &fcDiskMapper{
fcDisk: &fcDisk{
podUID: podUID,
volName: spec.Name(),
wwns: wwns,
lun: lun,
wwids: wwids,
manager: manager,
io: &osIOHandler{},
plugin: plugin},
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
}, nil
}
func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &fcUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
return &fcDiskUnmounter{
fcDisk: &fcDisk{
podUID: podUID,
volName: volName,
manager: manager,
plugin: plugin,
io: &osIOHandler{},
},
mounter: mounter,
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
}, nil
}
func (plugin *fcPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &fcUtil{})
}
func (plugin *fcPlugin) newUnmapperInternal(volName string, podUID types.UID, manager diskManager) (volume.BlockVolumeUnmapper, error) {
return &fcDiskUnmapper{
fcDisk: &fcDisk{
podUID: podUID,
volName: volName,
manager: manager,
plugin: plugin,
io: &osIOHandler{},
},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
}, nil
}
func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
// Find globalPDPath from pod volume directory(mountPath)
// examples:
// mountPath: pods/{podUid}/volumes/kubernetes.io~fc/{volumeName}
// globalPDPath : plugins/kubernetes.io/fc/50060e801049cfd1-lun-0
var globalPDPath string
mounter := plugin.host.GetMounter(plugin.GetPluginName())
paths, err := mounter.GetMountRefs(mountPath)
if err != nil {
return nil, err
}
for _, path := range paths {
if strings.Contains(path, plugin.host.GetPluginDir(fcPluginName)) {
globalPDPath = path
break
}
}
// Couldn't fetch globalPDPath
if len(globalPDPath) == 0 {
return nil, fmt.Errorf("couldn't fetch globalPDPath. failed to obtain volume spec")
}
wwns, lun, wwids, err := parsePDName(globalPDPath)
if err != nil {
return nil, fmt.Errorf("failed to retrieve volume plugin information from globalPDPath: %s", err)
}
// Create volume from wwn+lun or wwid
fcVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{WWIDs: wwids, Lun: &lun, TargetWWNs: wwns},
},
}
klog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v, WWIDs: %v",
fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun, fcVolume.VolumeSource.FC.WWIDs)
return volume.NewSpecFromVolume(fcVolume), nil
}
// ConstructBlockVolumeSpec creates a new volume.Spec with following steps.
// - Searches a file whose name is {pod uuid} under volume plugin directory.
// - If a file is found, then retreives volumePluginDependentPath from globalMapPathUUID.
// - Once volumePluginDependentPath is obtained, store volume information to VolumeSource
// examples:
// mapPath: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
// globalMapPathUUID : plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid}
func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(fcPluginName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
return nil, err
}
klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err)
// Retrieve globalPDPath from globalMapPathUUID
// globalMapPathUUID examples:
// wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/{pod uuid}
// wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/{pod uuid}
globalPDPath := filepath.Dir(globalMapPathUUID)
// Create volume from wwn+lun or wwid
wwns, lun, wwids, err := parsePDName(globalPDPath)
if err != nil {
return nil, fmt.Errorf("failed to retrieve volume plugin information from globalPDPath: %s", err)
}
fcPV := createPersistentVolumeFromFCVolumeSource(volumeName,
v1.FCVolumeSource{TargetWWNs: wwns, Lun: &lun, WWIDs: wwids})
klog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v, WWIDs: %v",
fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs,
*fcPV.Spec.PersistentVolumeSource.FC.Lun,
fcPV.Spec.PersistentVolumeSource.FC.WWIDs)
return volume.NewSpecFromPersistentVolume(fcPV, false), nil
}
type fcDisk struct {
volName string
podUID types.UID
portal string
wwns []string
lun string
wwids []string
plugin *fcPlugin
// Utility interface that provides API calls to the provider to attach/detach disks.
manager diskManager
// io handler interface
io ioHandler
volume.MetricsNil
}
func (fc *fcDisk) GetPath() string {
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
return fc.plugin.host.GetPodVolumeDir(fc.podUID, utilstrings.EscapeQualifiedName(fcPluginName), fc.volName)
}
func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) {
mounter, err := volumeSpecToMounter(spec, fc.plugin.host)
if err != nil {
klog.Warningf("failed to get fc mounter: %v", err)
return "", err
}
return fc.manager.MakeGlobalVDPDName(*mounter.fcDisk), nil
}
func (fc *fcDisk) fcPodDeviceMapPath() (string, string) {
return fc.plugin.host.GetPodVolumeDeviceDir(fc.podUID, utilstrings.EscapeQualifiedName(fcPluginName)), fc.volName
}
type fcDiskMounter struct {
*fcDisk
readOnly bool
fsType string
volumeMode v1.PersistentVolumeMode
mounter *mount.SafeFormatAndMount
deviceUtil util.DeviceUtil
mountOptions []string
}
var _ volume.Mounter = &fcDiskMounter{}
func (b *fcDiskMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *fcDiskMounter) CanMount() error {
return nil
}
func (b *fcDiskMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
// diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
if err != nil {
klog.Errorf("fc: failed to setup")
}
return err
}
type fcDiskUnmounter struct {
*fcDisk
mounter mount.Interface
deviceUtil util.DeviceUtil
}
var _ volume.Unmounter = &fcDiskUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (c *fcDiskUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *fcDiskUnmounter) TearDownAt(dir string) error {
return mount.CleanupMountPoint(dir, c.mounter, false)
}
// Block Volumes Support
type fcDiskMapper struct {
*fcDisk
readOnly bool
mounter mount.Interface
deviceUtil util.DeviceUtil
}
var _ volume.BlockVolumeMapper = &fcDiskMapper{}
func (b *fcDiskMapper) SetUpDevice() (string, error) {
return "", nil
}
func (b *fcDiskMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
}
type fcDiskUnmapper struct {
*fcDisk
deviceUtil util.DeviceUtil
}
var _ volume.BlockVolumeUnmapper = &fcDiskUnmapper{}
func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error {
err := c.manager.DetachBlockFCDisk(*c, mapPath, devicePath)
if err != nil {
return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", mapPath, err)
}
klog.V(4).Infof("fc: %s is unmounted, deleting the directory", mapPath)
if err = os.RemoveAll(mapPath); err != nil {
return fmt.Errorf("fc: failed to delete the directory: %s\nError: %v", mapPath, err)
}
klog.V(4).Infof("fc: successfully detached disk: %s", mapPath)
return nil
}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/{WWID}/{podUid}
func (fc *fcDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
return fc.fcGlobalMapPath(spec)
}
// GetPodDeviceMapPath returns pod device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~fc
// volumeName: pv0001
func (fc *fcDisk) GetPodDeviceMapPath() (string, string) {
return fc.fcPodDeviceMapPath()
}
func getVolumeSource(spec *volume.Spec) (*v1.FCVolumeSource, bool, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
if spec.Volume != nil && spec.Volume.FC != nil {
return spec.Volume.FC, spec.Volume.FC.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.FC != nil {
return spec.PersistentVolume.Spec.FC, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a FibreChannel volume type")
}
func createPersistentVolumeFromFCVolumeSource(volumeName string, fc v1.FCVolumeSource) *v1.PersistentVolume {
block := v1.PersistentVolumeBlock
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: volumeName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &fc,
},
VolumeMode: &block,
},
}
}
func getWwnsLunWwids(fc *v1.FCVolumeSource) ([]string, string, []string, error) {
var lun string
var wwids []string
if fc.Lun != nil && len(fc.TargetWWNs) != 0 {
lun = strconv.Itoa(int(*fc.Lun))
return fc.TargetWWNs, lun, wwids, nil
}
if len(fc.WWIDs) != 0 {
for _, wwid := range fc.WWIDs {
wwids = append(wwids, strings.Replace(wwid, " ", "_", -1))
}
return fc.TargetWWNs, lun, wwids, nil
}
return nil, "", nil, fmt.Errorf("fc: no fc disk information found")
}

View File

@ -1,512 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"os"
"runtime"
"strconv"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/fc" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{FC: &v1.FCVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{FC: &v1.FCVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}
type fakeDiskManager struct {
tmpDir string
attachCalled bool
detachCalled bool
}
func newFakeDiskManager() *fakeDiskManager {
return &fakeDiskManager{
tmpDir: utiltesting.MkTmpdirOrDie("fc_test"),
}
}
func (fake *fakeDiskManager) Cleanup() {
os.RemoveAll(fake.tmpDir)
}
func (fake *fakeDiskManager) MakeGlobalPDName(disk fcDisk) string {
return fake.tmpDir
}
func (fake *fakeDiskManager) MakeGlobalVDPDName(disk fcDisk) string {
return fake.tmpDir
}
func (fake *fakeDiskManager) AttachDisk(b fcDiskMounter) (string, error) {
globalPath := b.manager.MakeGlobalPDName(*b.fcDisk)
err := os.MkdirAll(globalPath, 0750)
if err != nil {
return "", err
}
// Simulate the global mount so that the fakeMounter returns the
// expected number of mounts for the attached disk.
b.mounter.Mount(globalPath, globalPath, b.fsType, nil)
fake.attachCalled = true
return "", nil
}
func (fake *fakeDiskManager) DetachDisk(c fcDiskUnmounter, mntPath string) error {
globalPath := c.manager.MakeGlobalPDName(*c.fcDisk)
err := os.RemoveAll(globalPath)
if err != nil {
return err
}
fake.detachCalled = true
return nil
}
func (fake *fakeDiskManager) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
err := os.RemoveAll(mapPath)
if err != nil {
return err
}
fake.detachCalled = true
return nil
}
func doTestPlugin(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeManager := newFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter: %v", err)
}
path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fc/vol1", tmpDir)
if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
fakeManager2 := newFakeDiskManager()
defer fakeManager2.Cleanup()
unmounter, err := plug.(*fcPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter: %v", err)
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}
func doTestPluginNilMounter(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeManager := newFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err == nil {
t.Errorf("Error failed to make a new Mounter is expected: %v", err)
}
if mounter != nil {
t.Errorf("A nil Mounter is expected: %v", err)
}
}
func TestPluginVolume(t *testing.T) {
lun := int32(0)
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"500a0981891b8dc5"},
FSType: "ext4",
Lun: &lun,
},
},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
lun := int32(0)
fs := v1.PersistentVolumeFilesystem
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"500a0981891b8dc5"},
FSType: "ext4",
Lun: &lun,
},
},
VolumeMode: &fs,
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPluginVolumeWWIDs(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{
WWIDs: []string{"3600508b400105e210000900000490000"},
FSType: "ext4",
},
},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolumeWWIDs(t *testing.T) {
fs := v1.PersistentVolumeFilesystem
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
WWIDs: []string{"3600508b400105e21 000900000490000"},
FSType: "ext4",
},
},
VolumeMode: &fs,
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPluginVolumeNoDiskInfo(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
FC: &v1.FCVolumeSource{
FSType: "ext4",
},
},
}
doTestPluginNilMounter(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolumeNoDiskInfo(t *testing.T) {
fs := v1.PersistentVolumeFilesystem
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
FSType: "ext4",
},
},
VolumeMode: &fs,
},
}
doTestPluginNilMounter(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
lun := int32(0)
fs := v1.PersistentVolumeFilesystem
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
VolumeMode: &fs,
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
VolumeMode: &fs,
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(fcPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func Test_getWwnsLun(t *testing.T) {
num := int32(0)
fc := &v1.FCVolumeSource{
TargetWWNs: []string{"500a0981891b8dc5"},
FSType: "ext4",
Lun: &num,
}
wwn, lun, _, err := getWwnsLunWwids(fc)
// if no wwn and lun, exit
if (len(wwn) == 0 && lun != "0") || err != nil {
t.Errorf("no fc disk found")
}
}
func Test_getWwids(t *testing.T) {
fc := &v1.FCVolumeSource{
FSType: "ext4",
WWIDs: []string{"3600508b400105e210000900000490000"},
}
_, _, wwid, err := getWwnsLunWwids(fc)
// if no wwn and lun, exit
if len(wwid) == 0 || err != nil {
t.Errorf("no fc disk found")
}
}
func Test_getWwnsLunWwidsError(t *testing.T) {
fc := &v1.FCVolumeSource{
FSType: "ext4",
}
wwn, lun, wwid, err := getWwnsLunWwids(fc)
// expected no wwn and lun and wwid
if (len(wwn) != 0 && lun != "" && len(wwid) != 0) || err == nil {
t.Errorf("unexpected fc disk found")
}
}
func Test_ConstructVolumeSpec(t *testing.T) {
if runtime.GOOS == "darwin" {
t.Skipf("Test_ConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
}
fm := &mount.FakeMounter{
MountPoints: []mount.MountPoint{
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50060e801049cfd1-lun-0"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000"},
},
}
mountPaths := []string{
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2",
}
for _, path := range mountPaths {
refs, err := fm.GetMountRefs(path)
if err != nil {
t.Errorf("couldn't get mountrefs. err: %v", err)
}
var globalPDPath string
for _, ref := range refs {
if strings.Contains(ref, "kubernetes.io/fc") {
globalPDPath = ref
break
}
}
if len(globalPDPath) == 0 {
t.Errorf("couldn't fetch mountrefs")
}
arr := strings.Split(globalPDPath, "/")
if len(arr) < 1 {
t.Errorf("failed to retrieve volume plugin information from globalPDPath: %v", globalPDPath)
}
volumeInfo := arr[len(arr)-1]
if strings.Contains(volumeInfo, "-lun-") {
wwnLun := strings.Split(volumeInfo, "-lun-")
if len(wwnLun) < 2 {
t.Errorf("failed to retrieve TargetWWN and Lun. volumeInfo is invalid: %v", volumeInfo)
}
lun, _ := strconv.Atoi(wwnLun[1])
lun32 := int32(lun)
if wwnLun[0] != "50060e801049cfd1" || lun32 != 0 {
t.Errorf("failed to retrieve TargetWWN and Lun")
}
} else {
if volumeInfo != "3600508b400105e210000900000490000" {
t.Errorf("failed to retrieve WWIDs")
}
}
}
}
func Test_ConstructVolumeSpecNoRefs(t *testing.T) {
fm := &mount.FakeMounter{
MountPoints: []mount.MountPoint{
{Device: "/dev/sdd", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
},
}
mountPaths := []string{
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
}
for _, path := range mountPaths {
refs, _ := fm.GetMountRefs(path)
var globalPDPath string
for _, ref := range refs {
if strings.Contains(ref, "kubernetes.io/fc") {
globalPDPath = ref
break
}
}
if len(globalPDPath) != 0 {
t.Errorf("invalid globalPDPath")
}
}
}

View File

@ -1,414 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
EvalSymlinks(path string) (string, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
}
type osIOHandler struct{}
const (
byPath = "/dev/disk/by-path/"
byID = "/dev/disk/by-id/"
)
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(name)
}
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
return filepath.EvalSymlinks(path)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
// given a wwn and lun, find the device and associated devicemapper parent
func findDisk(wwn, lun string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) {
fcPath := "-fc-0x" + wwn + "-lun-" + lun
devPath := byPath
if dirs, err := io.ReadDir(devPath); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.Contains(name, fcPath) {
if disk, err1 := io.EvalSymlinks(devPath + name); err1 == nil {
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
klog.Infof("fc: find disk: %v, dm: %v", disk, dm)
return disk, dm
}
}
}
}
return "", ""
}
// given a wwid, find the device and associated devicemapper parent
func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil) (string, string) {
// Example wwid format:
// 3600508b400105e210000900000490000
// <VENDOR NAME> <IDENTIFIER NUMBER>
// Example of symlink under by-id:
// /dev/by-id/scsi-3600508b400105e210000900000490000
// /dev/by-id/scsi-<VENDOR NAME>_<IDENTIFIER NUMBER>
// The wwid could contain white space and it will be replaced
// underscore when wwid is exposed under /dev/by-id.
fcPath := "scsi-" + wwid
devID := byID
if dirs, err := io.ReadDir(devID); err == nil {
for _, f := range dirs {
name := f.Name()
if name == fcPath {
disk, err := io.EvalSymlinks(devID + name)
if err != nil {
klog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", devID+name, err)
return "", ""
}
dm := deviceUtil.FindMultipathDeviceForDevice(disk)
klog.Infof("fc: find disk: %v, dm: %v", disk, dm)
return disk, dm
}
}
}
klog.V(2).Infof("fc: failed to find a disk [%s]", devID+fcPath)
return "", ""
}
// Removes a scsi device based upon /dev/sdX name
func removeFromScsiSubsystem(deviceName string, io ioHandler) {
fileName := "/sys/block/" + deviceName + "/device/delete"
klog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName)
data := []byte("1")
io.WriteFile(fileName, data, 0666)
}
// rescan scsi bus
func scsiHostRescan(io ioHandler) {
scsiPath := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsiPath); err == nil {
for _, f := range dirs {
name := scsiPath + f.Name() + "/scan"
data := []byte("- - -")
io.WriteFile(name, data, 0666)
}
}
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/target1-target2-lun-0
func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
if len(wwns) != 0 {
w := strings.Join(wwns, "-")
return path.Join(host.GetPluginDir(fcPluginName), w+"-lun-"+lun)
}
return path.Join(host.GetPluginDir(fcPluginName), strings.Join(wwids, "-"))
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/target-lun-0
func makeVDPDNameInternal(host volume.VolumeHost, wwns []string, lun string, wwids []string) string {
if len(wwns) != 0 {
w := strings.Join(wwns, "-")
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), w+"-lun-"+lun)
}
return path.Join(host.GetVolumeDevicePluginDir(fcPluginName), strings.Join(wwids, "-"))
}
func parsePDName(path string) (wwns []string, lun int32, wwids []string, err error) {
// parse directory name created by makePDNameInternal or makeVDPDNameInternal
dirname := filepath.Base(path)
components := strings.Split(dirname, "-")
l := len(components)
if l == 1 {
// No '-', it must be single WWID
return nil, 0, components, nil
}
if components[l-2] == "lun" {
// it has -lun-, it's list of WWNs + lun number as the last component
if l == 2 {
return nil, 0, nil, fmt.Errorf("no wwn in: %s", dirname)
}
lun, err := strconv.Atoi(components[l-1])
if err != nil {
return nil, 0, nil, err
}
return components[:l-2], int32(lun), nil, nil
}
// no -lun-, it's just list of WWIDs
return nil, 0, components, nil
}
type fcUtil struct{}
func (util *fcUtil) MakeGlobalPDName(fc fcDisk) string {
return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
}
// Global volume device plugin dir
func (util *fcUtil) MakeGlobalVDPDName(fc fcDisk) string {
return makeVDPDNameInternal(fc.plugin.host, fc.wwns, fc.lun, fc.wwids)
}
func searchDisk(b fcDiskMounter) (string, error) {
var diskIDs []string
var disk string
var dm string
io := b.io
wwids := b.wwids
wwns := b.wwns
lun := b.lun
if len(wwns) != 0 {
diskIDs = wwns
} else {
diskIDs = wwids
}
rescaned := false
// two-phase search:
// first phase, search existing device path, if a multipath dm is found, exit loop
// otherwise, in second phase, rescan scsi bus and search again, return with any findings
for true {
for _, diskID := range diskIDs {
if len(wwns) != 0 {
disk, dm = findDisk(diskID, lun, io, b.deviceUtil)
} else {
disk, dm = findDiskWWIDs(diskID, io, b.deviceUtil)
}
// if multipath device is found, break
if dm != "" {
break
}
}
// if a dm is found, exit loop
if rescaned || dm != "" {
break
}
// rescan and search again
// rescan scsi bus
scsiHostRescan(io)
rescaned = true
}
// if no disk matches input wwn and lun, exit
if disk == "" && dm == "" {
return "", fmt.Errorf("no fc disk found")
}
// if multipath devicemapper device is found, use it; otherwise use raw disk
if dm != "" {
return dm, nil
}
return disk, nil
}
func (util *fcUtil) AttachDisk(b fcDiskMounter) (string, error) {
devicePath, err := searchDisk(b)
if err != nil {
return "", err
}
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
// If the volumeMode is 'Block', plugin don't have to format the volume.
// The globalPDPath will be created by operationexecutor. Just return devicePath here.
klog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath)
if b.volumeMode == v1.PersistentVolumeBlock {
return devicePath, nil
}
}
// mount it
globalPDPath := util.MakeGlobalPDName(*b.fcDisk)
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return devicePath, fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath)
}
noMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
return devicePath, fmt.Errorf("Heuristic determination of mount point failed:%v", err)
}
if !noMnt {
klog.Infof("fc: %s already mounted", globalPDPath)
return devicePath, nil
}
err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil)
if err != nil {
return devicePath, fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
}
return devicePath, err
}
// DetachDisk removes scsi device file such as /dev/sdX from the node.
func (util *fcUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error {
var devices []string
// devicePath might be like /dev/mapper/mpathX. Find destination.
dstPath, err := c.io.EvalSymlinks(devicePath)
if err != nil {
return err
}
// Find slave
if strings.HasPrefix(dstPath, "/dev/dm-") {
devices = c.deviceUtil.FindSlaveDevicesOnMultipath(dstPath)
} else {
// Add single devicepath to devices
devices = append(devices, dstPath)
}
klog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices)
var lastErr error
for _, device := range devices {
err := util.detachFCDisk(c.io, device)
if err != nil {
klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
}
}
if lastErr != nil {
klog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
return lastErr
}
return nil
}
// detachFCDisk removes scsi device file such as /dev/sdX from the node.
func (util *fcUtil) detachFCDisk(io ioHandler, devicePath string) error {
// Remove scsi device from the node.
if !strings.HasPrefix(devicePath, "/dev/") {
return fmt.Errorf("fc detach disk: invalid device name: %s", devicePath)
}
arr := strings.Split(devicePath, "/")
dev := arr[len(arr)-1]
removeFromScsiSubsystem(dev, io)
return nil
}
// DetachBlockFCDisk detaches a volume from kubelet node, removes scsi device file
// such as /dev/sdX from the node, and then removes loopback for the scsi device.
func (util *fcUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath string) error {
// Check if devicePath is valid
if len(devicePath) != 0 {
if pathExists, pathErr := checkPathExists(devicePath); !pathExists || pathErr != nil {
return pathErr
}
} else {
// TODO: FC plugin can't obtain the devicePath from kubelet because devicePath
// in volume object isn't updated when volume is attached to kubelet node.
klog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath)
}
// Check if global map path is valid
// global map path examples:
// wwn+lun: plugins/kubernetes.io/fc/volumeDevices/50060e801049cfd1-lun-0/
// wwid: plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000/
if pathExists, pathErr := checkPathExists(mapPath); !pathExists || pathErr != nil {
return pathErr
}
// Retrieve volume plugin dependent path like '50060e801049cfd1-lun-0' from global map path
arr := strings.Split(mapPath, "/")
if len(arr) < 1 {
return fmt.Errorf("Fail to retrieve volume plugin information from global map path: %v", mapPath)
}
volumeInfo := arr[len(arr)-1]
// Search symbolic link which matches volumeInfo under /dev/disk/by-path or /dev/disk/by-id
// then find destination device path from the link
searchPath := byID
if strings.Contains(volumeInfo, "-lun-") {
searchPath = byPath
}
fis, err := ioutil.ReadDir(searchPath)
if err != nil {
return err
}
for _, fi := range fis {
if strings.Contains(fi.Name(), volumeInfo) {
devicePath = path.Join(searchPath, fi.Name())
klog.V(5).Infof("fc: updated devicePath: %s", devicePath)
break
}
}
if len(devicePath) == 0 {
return fmt.Errorf("fc: failed to find corresponding device from searchPath: %v", searchPath)
}
dstPath, err := c.io.EvalSymlinks(devicePath)
if err != nil {
return err
}
klog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath)
var devices []string
dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath)
if len(dm) != 0 {
dstPath = dm
}
// Detach volume from kubelet node
if len(dm) != 0 {
// Find all devices which are managed by multipath
devices = c.deviceUtil.FindSlaveDevicesOnMultipath(dm)
} else {
// Add single device path to devices
devices = append(devices, dstPath)
}
var lastErr error
for _, device := range devices {
err = util.detachFCDisk(c.io, device)
if err != nil {
klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err)
}
}
if lastErr != nil {
klog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr)
return lastErr
}
return nil
}
func checkPathExists(path string) (bool, error) {
if pathExists, pathErr := mount.PathExists(path); pathErr != nil {
return pathExists, fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
klog.Warningf("Warning: Unmap skipped because path does not exist: %v", path)
return pathExists, nil
}
return true, nil
}

View File

@ -1,184 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"os"
"reflect"
"testing"
"time"
"k8s.io/kubernetes/pkg/volume/util"
)
type fakeFileInfo struct {
name string
}
func (fi *fakeFileInfo) Name() string {
return fi.name
}
func (fi *fakeFileInfo) Size() int64 {
return 0
}
func (fi *fakeFileInfo) Mode() os.FileMode {
return 777
}
func (fi *fakeFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *fakeFileInfo) IsDir() bool {
return false
}
func (fi *fakeFileInfo) Sys() interface{} {
return nil
}
type fakeIOHandler struct{}
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/dev/disk/by-path/":
f := &fakeFileInfo{
name: "pci-0000:41:00.0-fc-0x500a0981891b8dc5-lun-0",
}
return []os.FileInfo{f}, nil
case "/sys/block/":
f := &fakeFileInfo{
name: "dm-1",
}
return []os.FileInfo{f}, nil
case "/dev/disk/by-id/":
f := &fakeFileInfo{
name: "scsi-3600508b400105e210000900000490000",
}
return []os.FileInfo{f}, nil
}
return nil, nil
}
func (handler *fakeIOHandler) Lstat(name string) (os.FileInfo, error) {
return nil, nil
}
func (handler *fakeIOHandler) EvalSymlinks(path string) (string, error) {
return "/dev/sda", nil
}
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return nil
}
func TestSearchDisk(t *testing.T) {
fakeMounter := fcDiskMounter{
fcDisk: &fcDisk{
wwns: []string{"500a0981891b8dc5"},
lun: "0",
io: &fakeIOHandler{},
},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
}
devicePath, error := searchDisk(fakeMounter)
// if no disk matches input wwn and lun, exit
if devicePath == "" || error != nil {
t.Errorf("no fc disk found")
}
}
func TestSearchDiskWWID(t *testing.T) {
fakeMounter := fcDiskMounter{
fcDisk: &fcDisk{
wwids: []string{"3600508b400105e210000900000490000"},
io: &fakeIOHandler{},
},
deviceUtil: util.NewDeviceHandler(util.NewIOHandler()),
}
devicePath, error := searchDisk(fakeMounter)
// if no disk matches input wwid, exit
if devicePath == "" || error != nil {
t.Errorf("no fc disk found")
}
}
func TestParsePDName(t *testing.T) {
tests := []struct {
name string
path string
wwns []string
lun int32
wwids []string
expectError bool
}{
{
name: "single WWID",
path: "/var/lib/kubelet/plugins/kubernetes.io/fc/60050763008084e6e0000000000001ae",
wwids: []string{"60050763008084e6e0000000000001ae"},
},
{
name: "multiple WWID",
path: "/var/lib/kubelet/plugins/kubernetes.io/fc/60050763008084e6e0000000000001ae-60050763008084e6e0000000000001af",
wwids: []string{"60050763008084e6e0000000000001ae", "60050763008084e6e0000000000001af"},
},
{
name: "single WWN",
path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50050768030539b6-lun-0",
wwns: []string{"50050768030539b6"},
lun: 0,
},
{
name: "multiple WWNs",
path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50050768030539b6-50050768030539b7-lun-0",
wwns: []string{"50050768030539b6", "50050768030539b7"},
lun: 0,
},
{
name: "no WWNs",
path: "/var/lib/kubelet/plugins/kubernetes.io/fc/lun-0",
expectError: true,
},
{
name: "invalid lun",
path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50050768030539b6-lun-x",
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
wwns, lun, wwids, err := parsePDName(test.path)
if test.expectError && err == nil {
t.Errorf("expected error but got none")
}
if !test.expectError && err != nil {
t.Errorf("got unexpected error: %s", err)
}
if !reflect.DeepEqual(wwns, test.wwns) {
t.Errorf("expected WWNs %+v, got %+v", test.wwns, wwns)
}
if lun != test.lun {
t.Errorf("expected lun %d, got %d", test.lun, lun)
}
if !reflect.DeepEqual(wwids, test.wwids) {
t.Errorf("expected WWIDs %+v, got %+v", test.wwids, wwids)
}
})
}
}

View File

@ -1,67 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"flocker.go",
"flocker_util.go",
"flocker_volume.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/flocker",
deps = [
"//pkg/util/env:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/github.com/clusterhq/flocker-go:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"flocker_test.go",
"flocker_util_test.go",
"flocker_volume_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/clusterhq/flocker-go:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,13 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- rootfs
- saad-ali
reviewers:
- agonzalezro
- simonswine
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42

View File

@ -1,18 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package flocker contains the internal representation of Flocker volumes
package flocker // import "k8s.io/kubernetes/pkg/volume/flocker"

View File

@ -1,468 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flocker
import (
"fmt"
"os"
"path"
"time"
flockerapi "github.com/clusterhq/flocker-go"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/env"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
utilstrings "k8s.io/utils/strings"
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&flockerPlugin{nil}}
}
type flockerPlugin struct {
host volume.VolumeHost
}
type flockerVolume struct {
volName string
podUID types.UID
// dataset metadata name deprecated
datasetName string
// dataset uuid
datasetUUID string
//pod *v1.Pod
flockerClient flockerapi.Clientable
manager volumeManager
plugin *flockerPlugin
mounter mount.Interface
volume.MetricsProvider
}
var _ volume.VolumePlugin = &flockerPlugin{}
var _ volume.PersistentVolumePlugin = &flockerPlugin{}
var _ volume.DeletableVolumePlugin = &flockerPlugin{}
var _ volume.ProvisionableVolumePlugin = &flockerPlugin{}
const (
flockerPluginName = "kubernetes.io/flocker"
defaultHost = "localhost"
defaultPort = 4523
defaultCACertFile = "/etc/flocker/cluster.crt"
defaultClientKeyFile = "/etc/flocker/apiuser.key"
defaultClientCertFile = "/etc/flocker/apiuser.crt"
defaultMountPath = "/flocker"
timeoutWaitingForVolume = 2 * time.Minute
tickerWaitingForVolume = 5 * time.Second
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(flockerPluginName), volName)
}
func makeGlobalFlockerPath(datasetUUID string) string {
return path.Join(defaultMountPath, datasetUUID)
}
func (p *flockerPlugin) Init(host volume.VolumeHost) error {
p.host = host
return nil
}
func (p *flockerPlugin) GetPluginName() string {
return flockerPluginName
}
func (p *flockerPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.DatasetName, nil
}
func (p *flockerPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Flocker != nil) ||
(spec.Volume != nil && spec.Volume.Flocker != nil)
}
func (p *flockerPlugin) IsMigratedToCSI() bool {
return false
}
func (p *flockerPlugin) RequiresRemount() bool {
return false
}
func (p *flockerPlugin) SupportsMountOption() bool {
return false
}
func (p *flockerPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (p *flockerPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*v1.FlockerVolumeSource, bool) {
// AFAIK this will always be r/w, but perhaps for the future it will be needed
readOnly := false
if spec.Volume != nil && spec.Volume.Flocker != nil {
return spec.Volume.Flocker, readOnly
}
return spec.PersistentVolume.Spec.Flocker, readOnly
}
func (p *flockerPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return p.newMounterInternal(spec, pod.UID, &flockerUtil{}, p.host.GetMounter(p.GetPluginName()))
}
func (p *flockerPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Mounter, error) {
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
datasetName := volumeSource.DatasetName
datasetUUID := volumeSource.DatasetUUID
return &flockerVolumeMounter{
flockerVolume: &flockerVolume{
podUID: podUID,
volName: spec.Name(),
datasetName: datasetName,
datasetUUID: datasetUUID,
mounter: mounter,
manager: manager,
plugin: p,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), p.host)),
},
readOnly: readOnly}, nil
}
func (p *flockerPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return p.newUnmounterInternal(volName, podUID, &flockerUtil{}, p.host.GetMounter(p.GetPluginName()))
}
func (p *flockerPlugin) newUnmounterInternal(volName string, podUID types.UID, manager volumeManager, mounter mount.Interface) (volume.Unmounter, error) {
return &flockerVolumeUnmounter{&flockerVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: p,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, p.host)),
}}, nil
}
func (p *flockerPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
flockerVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetName: volumeName,
},
},
}
return volume.NewSpecFromVolume(flockerVolume), nil
}
func (b *flockerVolume) GetDatasetUUID() (datasetUUID string, err error) {
// return UUID if set
if len(b.datasetUUID) > 0 {
return b.datasetUUID, nil
}
if b.flockerClient == nil {
return "", fmt.Errorf("Flocker client is not initialized")
}
// lookup in flocker API otherwise
return b.flockerClient.GetDatasetID(b.datasetName)
}
type flockerVolumeMounter struct {
*flockerVolume
readOnly bool
}
func (b *flockerVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: false,
SupportsSELinux: false,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *flockerVolumeMounter) CanMount() error {
return nil
}
func (b *flockerVolumeMounter) GetPath() string {
return getPath(b.podUID, b.volName, b.plugin.host)
}
// SetUp bind mounts the disk global mount to the volume path.
func (b *flockerVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// newFlockerClient uses environment variables and pod attributes to return a
// flocker client capable of talking with the Flocker control service.
func (p *flockerPlugin) newFlockerClient(hostIP string) (*flockerapi.Client, error) {
host := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_HOST", defaultHost)
port, err := env.GetEnvAsIntOrFallback("FLOCKER_CONTROL_SERVICE_PORT", defaultPort)
if err != nil {
return nil, err
}
caCertPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CA_FILE", defaultCACertFile)
keyPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE", defaultClientKeyFile)
certPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE", defaultClientCertFile)
c, err := flockerapi.NewClient(host, port, hostIP, caCertPath, keyPath, certPath)
return c, err
}
func (b *flockerVolumeMounter) newFlockerClient() (*flockerapi.Client, error) {
hostIP, err := b.plugin.host.GetHostIP()
if err != nil {
return nil, err
}
return b.plugin.newFlockerClient(hostIP.String())
}
/*
SetUpAt will setup a Flocker volume following this flow of calls to the Flocker
control service:
1. Get the dataset id for the given volume name/dir
2. It should already be there, if it's not the user needs to manually create it
3. Check the current Primary UUID
4. If it doesn't match with the Primary UUID that we got on 2, then we will
need to update the Primary UUID for this volume.
5. Wait until the Primary UUID was updated or timeout.
*/
func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
var err error
if b.flockerClient == nil {
b.flockerClient, err = b.newFlockerClient()
if err != nil {
return err
}
}
datasetUUID, err := b.GetDatasetUUID()
if err != nil {
return fmt.Errorf("The datasetUUID for volume with datasetName='%s' can not be found using flocker: %s", b.datasetName, err)
}
datasetState, err := b.flockerClient.GetDatasetState(datasetUUID)
if err != nil {
return fmt.Errorf("The datasetState for volume with datasetUUID='%s' could not determinted uusing flocker: %s", datasetUUID, err)
}
primaryUUID, err := b.flockerClient.GetPrimaryUUID()
if err != nil {
return err
}
if datasetState.Primary != primaryUUID {
if err := b.updateDatasetPrimary(datasetUUID, primaryUUID); err != nil {
return err
}
_, err := b.flockerClient.GetDatasetState(datasetUUID)
if err != nil {
return fmt.Errorf("The volume with datasetUUID='%s' migrated unsuccessfully", datasetUUID)
}
}
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
klog.V(4).Infof("flockerVolume set up: %s %v %v, datasetUUID %v readOnly %v", dir, !notMnt, err, datasetUUID, b.readOnly)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("cannot validate mount point: %s %v", dir, err)
return err
}
if !notMnt {
return nil
}
if err := os.MkdirAll(dir, 0750); err != nil {
klog.Errorf("mkdir failed on disk %s (%v)", dir, err)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
globalFlockerPath := makeGlobalFlockerPath(datasetUUID)
klog.V(4).Infof("attempting to mount %s", dir)
err = b.mounter.Mount(globalFlockerPath, dir, "", options)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
klog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
klog.Errorf("failed to unmount: %v", mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
klog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
klog.Errorf("mount of disk %s failed: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
klog.V(4).Infof("successfully mounted %s", dir)
return nil
}
// updateDatasetPrimary will update the primary in Flocker and wait for it to
// be ready. If it never gets to ready state it will timeout and error.
func (b *flockerVolumeMounter) updateDatasetPrimary(datasetUUID string, primaryUUID string) error {
// We need to update the primary and wait for it to be ready
_, err := b.flockerClient.UpdatePrimaryForDataset(primaryUUID, datasetUUID)
if err != nil {
return err
}
timeoutChan := time.NewTimer(timeoutWaitingForVolume)
defer timeoutChan.Stop()
tickChan := time.NewTicker(tickerWaitingForVolume)
defer tickChan.Stop()
for {
if s, err := b.flockerClient.GetDatasetState(datasetUUID); err == nil && s.Primary == primaryUUID {
return nil
}
select {
case <-timeoutChan.C:
return fmt.Errorf(
"Timed out waiting for the datasetUUID: '%s' to be moved to the primary: '%s'\n%v",
datasetUUID, primaryUUID, err,
)
case <-tickChan.C:
break
}
}
}
func getVolumeSource(spec *volume.Spec) (*v1.FlockerVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.Flocker != nil {
return spec.Volume.Flocker, spec.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Flocker != nil {
return spec.PersistentVolume.Spec.Flocker, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a Flocker volume type")
}
type flockerVolumeUnmounter struct {
*flockerVolume
}
var _ volume.Unmounter = &flockerVolumeUnmounter{}
func (c *flockerVolumeUnmounter) GetPath() string {
return getPath(c.podUID, c.volName, c.plugin.host)
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *flockerVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// TearDownAt unmounts the bind mount
func (c *flockerVolumeUnmounter) TearDownAt(dir string) error {
return mount.CleanupMountPoint(dir, c.mounter, false)
}
func (p *flockerPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return p.newDeleterInternal(spec, &flockerUtil{})
}
func (p *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volumeManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Flocker == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.Flocker is nil")
}
return &flockerVolumeDeleter{
flockerVolume: &flockerVolume{
volName: spec.Name(),
datasetName: spec.PersistentVolume.Spec.Flocker.DatasetName,
datasetUUID: spec.PersistentVolume.Spec.Flocker.DatasetUUID,
manager: manager,
}}, nil
}
func (p *flockerPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return p.newProvisionerInternal(options, &flockerUtil{})
}
func (p *flockerPlugin) newProvisionerInternal(options volume.VolumeOptions, manager volumeManager) (volume.Provisioner, error) {
return &flockerVolumeProvisioner{
flockerVolume: &flockerVolume{
manager: manager,
plugin: p,
},
options: options,
}, nil
}

View File

@ -1,363 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flocker
import (
"fmt"
"os"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
flockerapi "github.com/clusterhq/flocker-go"
"github.com/stretchr/testify/assert"
)
const pluginName = "kubernetes.io/flocker"
const datasetOneID = "11111111-1111-1111-1111-111111111100"
const nodeOneID = "11111111-1111-1111-1111-111111111111"
const nodeTwoID = "22222222-2222-2222-2222-222222222222"
var _ flockerapi.Clientable = &fakeFlockerClient{}
type fakeFlockerClient struct {
DatasetID string
Primary string
Deleted bool
Metadata map[string]string
Nodes []flockerapi.NodeState
Error error
}
func newFakeFlockerClient() *fakeFlockerClient {
return &fakeFlockerClient{
DatasetID: datasetOneID,
Primary: nodeOneID,
Deleted: false,
Metadata: map[string]string{"Name": "dataset-one"},
Nodes: []flockerapi.NodeState{
{
Host: "1.2.3.4",
UUID: nodeOneID,
},
{
Host: "4.5.6.7",
UUID: nodeTwoID,
},
},
}
}
func (c *fakeFlockerClient) CreateDataset(options *flockerapi.CreateDatasetOptions) (*flockerapi.DatasetState, error) {
if c.Error != nil {
return nil, c.Error
}
return &flockerapi.DatasetState{
DatasetID: c.DatasetID,
}, nil
}
func (c *fakeFlockerClient) DeleteDataset(datasetID string) error {
c.DatasetID = datasetID
c.Deleted = true
return nil
}
func (c *fakeFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) {
return &flockerapi.DatasetState{}, nil
}
func (c *fakeFlockerClient) GetDatasetID(metaName string) (datasetID string, err error) {
if val, ok := c.Metadata["Name"]; !ok {
return val, nil
}
return "", fmt.Errorf("No dataset with metadata X found")
}
func (c *fakeFlockerClient) GetPrimaryUUID() (primaryUUID string, err error) {
return
}
func (c *fakeFlockerClient) ListNodes() (nodes []flockerapi.NodeState, err error) {
return c.Nodes, nil
}
func (c *fakeFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) {
return &flockerapi.DatasetState{}, nil
}
type fakeFlockerUtil struct {
}
func (fake *fakeFlockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error) {
labels = make(map[string]string)
labels["fakeflockerutil"] = "yes"
return "test-flocker-volume-uuid", 3, labels, nil
}
func (fake *fakeFlockerUtil) DeleteVolume(cd *flockerVolumeDeleter) error {
if cd.datasetUUID != "test-flocker-volume-uuid" {
return fmt.Errorf("Deleter got unexpected datasetUUID: %s", cd.datasetUUID)
}
return nil
}
func newInitializedVolumePlugMgr(t *testing.T) (*volume.VolumePluginMgr, string) {
plugMgr := &volume.VolumePluginMgr{}
dir, err := utiltesting.MkTmpdir("flocker")
assert.NoError(t, err)
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(dir, nil, nil))
return plugMgr, dir
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("flockerTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/flocker")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetUUID: "uuid1",
},
},
}
fakeManager := &fakeFlockerUtil{}
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*flockerPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
}
func TestGetByName(t *testing.T) {
assert := assert.New(t)
plugMgr, dir := newInitializedVolumePlugMgr(t)
defer os.RemoveAll(dir)
plug, err := plugMgr.FindPluginByName(pluginName)
assert.NotNil(plug, "Can't find the plugin by name")
assert.NoError(err)
}
func TestCanSupport(t *testing.T) {
assert := assert.New(t)
plugMgr, dir := newInitializedVolumePlugMgr(t)
defer os.RemoveAll(dir)
plug, err := plugMgr.FindPluginByName(pluginName)
assert.NoError(err)
specs := map[*volume.Spec]bool{
{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{},
},
},
}: true,
{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Flocker: &v1.FlockerVolumeSource{},
},
},
},
}: true,
{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{},
},
}: false,
}
for spec, expected := range specs {
actual := plug.CanSupport(spec)
assert.Equal(expected, actual)
}
}
func TestGetFlockerVolumeSource(t *testing.T) {
assert := assert.New(t)
p := flockerPlugin{}
spec := &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{},
},
},
}
vs, ro := p.getFlockerVolumeSource(spec)
assert.False(ro)
assert.Equal(spec.Volume.Flocker, vs)
spec = &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Flocker: &v1.FlockerVolumeSource{},
},
},
},
}
vs, ro = p.getFlockerVolumeSource(spec)
assert.False(ro)
assert.Equal(spec.PersistentVolume.Spec.Flocker, vs)
}
func TestNewMounterDatasetName(t *testing.T) {
assert := assert.New(t)
plugMgr, dir := newInitializedVolumePlugMgr(t)
defer os.RemoveAll(dir)
plug, err := plugMgr.FindPluginByName(pluginName)
assert.NoError(err)
spec := &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetName: "something",
},
},
},
}
_, err = plug.NewMounter(spec, &v1.Pod{}, volume.VolumeOptions{})
assert.NoError(err)
}
func TestNewMounterDatasetUUID(t *testing.T) {
assert := assert.New(t)
plugMgr, dir := newInitializedVolumePlugMgr(t)
defer os.RemoveAll(dir)
plug, err := plugMgr.FindPluginByName(pluginName)
assert.NoError(err)
spec := &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetUUID: "uuid1",
},
},
},
}
mounter, err := plug.NewMounter(spec, &v1.Pod{}, volume.VolumeOptions{})
assert.NoError(err)
assert.NotNil(mounter, "got a nil mounter")
}
func TestNewUnmounter(t *testing.T) {
t.Skip("broken")
assert := assert.New(t)
p := flockerPlugin{}
unmounter, err := p.NewUnmounter("", types.UID(""))
assert.Nil(unmounter)
assert.NoError(err)
}
func TestIsReadOnly(t *testing.T) {
b := &flockerVolumeMounter{readOnly: true}
assert.True(t, b.GetAttributes().ReadOnly)
}
type mockFlockerClient struct {
datasetID, primaryUUID, path string
datasetState *flockerapi.DatasetState
}
func newMockFlockerClient(mockDatasetID, mockPrimaryUUID, mockPath string) *mockFlockerClient {
return &mockFlockerClient{
datasetID: mockDatasetID,
primaryUUID: mockPrimaryUUID,
path: mockPath,
datasetState: &flockerapi.DatasetState{
Path: mockPath,
DatasetID: mockDatasetID,
Primary: mockPrimaryUUID,
},
}
}
func (m mockFlockerClient) CreateDataset(metaName string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil
}
func (m mockFlockerClient) GetDatasetState(datasetID string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil
}
func (m mockFlockerClient) GetDatasetID(metaName string) (string, error) {
return m.datasetID, nil
}
func (m mockFlockerClient) GetPrimaryUUID() (string, error) {
return m.primaryUUID, nil
}
func (m mockFlockerClient) UpdatePrimaryForDataset(primaryUUID, datasetID string) (*flockerapi.DatasetState, error) {
return m.datasetState, nil
}
/*
TODO: re-enable after refactor
func TestSetUpAtInternal(t *testing.T) {
const dir = "dir"
mockPath := "expected-to-be-set-properly" // package var
expectedPath := mockPath
assert := assert.New(t)
plugMgr, rootDir := newInitializedVolumePlugMgr(t)
if rootDir != "" {
defer os.RemoveAll(rootDir)
}
plug, err := plugMgr.FindPluginByName(flockerPluginName)
assert.NoError(err)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
b := flockerVolumeMounter{flockerVolume: &flockerVolume{pod: pod, plugin: plug.(*flockerPlugin)}}
b.client = newMockFlockerClient("dataset-id", "primary-uid", mockPath)
assert.NoError(b.SetUpAt(dir, nil))
assert.Equal(expectedPath, b.flocker.path)
}
*/

View File

@ -1,98 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flocker
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/rand"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
flockerapi "github.com/clusterhq/flocker-go"
"k8s.io/klog"
)
type flockerUtil struct{}
func (util *flockerUtil) DeleteVolume(d *flockerVolumeDeleter) error {
var err error
if d.flockerClient == nil {
d.flockerClient, err = d.plugin.newFlockerClient("")
if err != nil {
return err
}
}
datasetUUID, err := d.GetDatasetUUID()
if err != nil {
return err
}
return d.flockerClient.DeleteDataset(datasetUUID)
}
func (util *flockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGiB int, labels map[string]string, err error) {
if c.flockerClient == nil {
c.flockerClient, err = c.plugin.newFlockerClient("")
if err != nil {
return
}
}
nodes, err := c.flockerClient.ListNodes()
if err != nil {
return
}
if len(nodes) < 1 {
err = fmt.Errorf("no nodes found inside the flocker cluster to provision a dataset")
return
}
// select random node
node := nodes[rand.Intn(len(nodes))]
klog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID)
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
volumeSizeGiB, err = volumehelpers.RoundUpToGiBInt(capacity)
if err != nil {
return
}
createOptions := &flockerapi.CreateDatasetOptions{
MaximumSize: requestBytes,
Metadata: map[string]string{
"type": "k8s-dynamic-prov",
"pvc": c.options.PVC.Name,
},
Primary: node.UUID,
}
datasetState, err := c.flockerClient.CreateDataset(createOptions)
if err != nil {
return
}
datasetUUID = datasetState.DatasetID
klog.V(2).Infof("successfully created Flocker dataset with UUID '%s'", datasetUUID)
return
}

View File

@ -1,58 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flocker
import (
"fmt"
"os"
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/stretchr/testify/assert"
)
func TestFlockerUtil_CreateVolume(t *testing.T) {
assert := assert.New(t)
// test CreateVolume happy path
pvc := volumetest.CreateTestPVC("3Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
options := volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
fakeFlockerClient := newFakeFlockerClient()
dir, p := newTestableProvisioner(assert, options)
provisioner := p.(*flockerVolumeProvisioner)
defer os.RemoveAll(dir)
provisioner.flockerClient = fakeFlockerClient
flockerUtil := &flockerUtil{}
datasetID, size, _, err := flockerUtil.CreateVolume(provisioner)
assert.NoError(err)
assert.Equal(datasetOneID, datasetID)
assert.Equal(3, size)
// test error during CreateVolume
fakeFlockerClient.Error = fmt.Errorf("Do not feel like provisioning")
_, _, _, err = flockerUtil.CreateVolume(provisioner)
assert.Equal(fakeFlockerClient.Error.Error(), err.Error())
}

View File

@ -1,114 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flocker
import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
type volumeManager interface {
// Creates a volume
CreateVolume(provisioner *flockerVolumeProvisioner) (datasetUUID string, volumeSizeGB int, labels map[string]string, err error)
// Deletes a volume
DeleteVolume(deleter *flockerVolumeDeleter) error
}
type flockerVolumeDeleter struct {
*flockerVolume
}
var _ volume.Deleter = &flockerVolumeDeleter{}
func (b *flockerVolumeDeleter) GetPath() string {
return getPath(b.podUID, b.volName, b.plugin.host)
}
func (b *flockerVolumeDeleter) Delete() error {
return b.manager.DeleteVolume(b)
}
type flockerVolumeProvisioner struct {
*flockerVolume
options volume.VolumeOptions
}
var _ volume.Provisioner = &flockerVolumeProvisioner{}
func (c *flockerVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
if len(c.options.Parameters) > 0 {
return nil, fmt.Errorf("Provisioning failed: Specified at least one unsupported parameter")
}
if c.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("Provisioning failed: Specified unsupported selector")
}
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
}
datasetUUID, sizeGB, labels, err := c.manager.CreateVolume(c)
if err != nil {
return nil, err
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
util.VolumeDynamicallyCreatedByKey: "flocker-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Flocker: &v1.FlockerVolumeSource{
DatasetUUID: datasetUUID,
},
},
},
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
}
}
return pv, nil
}

View File

@ -1,102 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flocker
import (
"fmt"
"os"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/stretchr/testify/assert"
)
func newTestableProvisioner(assert *assert.Assertions, options volume.VolumeOptions) (string, volume.Provisioner) {
tmpDir, err := utiltesting.MkTmpdir("flockervolumeTest")
assert.NoError(err, fmt.Sprintf("can't make a temp dir: %v", err))
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(pluginName)
assert.NoError(err, "Can't find the plugin by name")
provisioner, err := plug.(*flockerPlugin).newProvisionerInternal(options, &fakeFlockerUtil{})
assert.NoError(err, fmt.Sprintf("Can't create new provisioner:%v", err))
return tmpDir, provisioner
}
func TestProvision(t *testing.T) {
assert := assert.New(t)
pvc := volumetest.CreateTestPVC("3Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
options := volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
dir, provisioner := newTestableProvisioner(assert, options)
defer os.RemoveAll(dir)
persistentSpec, err := provisioner.Provision(nil, nil)
assert.NoError(err, "Provision() failed: ", err)
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
assert.Equal(int64(3*1024*1024*1024), cap.Value())
assert.Equal(
"test-flocker-volume-uuid",
persistentSpec.Spec.PersistentVolumeSource.Flocker.DatasetUUID,
)
assert.Equal(
map[string]string{"fakeflockerutil": "yes"},
persistentSpec.Labels,
)
// parameters are not supported
options = volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
Parameters: map[string]string{
"not-supported-params": "test123",
},
}
dir, provisioner = newTestableProvisioner(assert, options)
defer os.RemoveAll(dir)
persistentSpec, err = provisioner.Provision(nil, nil)
assert.Error(err, "Provision() did not fail with Parameters specified")
// selectors are not supported
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}
options = volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
dir, provisioner = newTestableProvisioner(assert, options)
defer os.RemoveAll(dir)
persistentSpec, err = provisioner.Provision(nil, nil)
assert.Error(err, "Provision() did not fail with Selector specified")
}

View File

@ -1,53 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"git_repo.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/git_repo",
deps = [
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["git_repo_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/volume:go_default_library",
"//pkg/volume/emptydir:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,11 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- thockin
- saad-ali
reviewers:
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42

View File

@ -1,18 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package git_repo contains the internal representation of git repo volumes.
package git_repo // import "k8s.io/kubernetes/pkg/volume/git_repo"

View File

@ -1,307 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git_repo
import (
"fmt"
"io/ioutil"
"path"
"path/filepath"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
utilstrings "k8s.io/utils/strings"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&gitRepoPlugin{nil}}
}
type gitRepoPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &gitRepoPlugin{}
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &v1.Volume{VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
}
}
const (
gitRepoPluginName = "kubernetes.io/git-repo"
)
func (plugin *gitRepoPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *gitRepoPlugin) GetPluginName() string {
return gitRepoPluginName
}
func (plugin *gitRepoPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _ := getVolumeSource(spec)
if volumeSource == nil {
return "", fmt.Errorf("Spec does not reference a Git repo volume type")
}
return fmt.Sprintf(
"%v:%v:%v",
volumeSource.Repository,
volumeSource.Revision,
volumeSource.Directory), nil
}
func (plugin *gitRepoPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.GitRepo != nil
}
func (plugin *gitRepoPlugin) IsMigratedToCSI() bool {
return false
}
func (plugin *gitRepoPlugin) RequiresRemount() bool {
return false
}
func (plugin *gitRepoPlugin) SupportsMountOption() bool {
return false
}
func (plugin *gitRepoPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
if err := validateVolume(spec.Volume.GitRepo); err != nil {
return nil, err
}
return &gitRepoVolumeMounter{
gitRepoVolume: &gitRepoVolume{
volName: spec.Name(),
podUID: pod.UID,
plugin: plugin,
},
pod: *pod,
source: spec.Volume.GitRepo.Repository,
revision: spec.Volume.GitRepo.Revision,
target: spec.Volume.GitRepo.Directory,
exec: exec.New(),
opts: opts,
}, nil
}
func (plugin *gitRepoPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &gitRepoVolumeUnmounter{
&gitRepoVolume{
volName: volName,
podUID: podUID,
plugin: plugin,
},
}, nil
}
func (plugin *gitRepoPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
gitVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{},
},
}
return volume.NewSpecFromVolume(gitVolume), nil
}
// gitRepo volumes are directories which are pre-filled from a git repository.
// These do not persist beyond the lifetime of a pod.
type gitRepoVolume struct {
volName string
podUID types.UID
plugin *gitRepoPlugin
volume.MetricsNil
}
var _ volume.Volume = &gitRepoVolume{}
func (gr *gitRepoVolume) GetPath() string {
name := gitRepoPluginName
return gr.plugin.host.GetPodVolumeDir(gr.podUID, utilstrings.EscapeQualifiedName(name), gr.volName)
}
// gitRepoVolumeMounter builds git repo volumes.
type gitRepoVolumeMounter struct {
*gitRepoVolume
pod v1.Pod
source string
revision string
target string
exec exec.Interface
opts volume.VolumeOptions
}
var _ volume.Mounter = &gitRepoVolumeMounter{}
func (b *gitRepoVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: false,
Managed: true,
SupportsSELinux: true, // xattr change should be okay, TODO: double check
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *gitRepoVolumeMounter) CanMount() error {
return nil
}
// SetUp creates new directory and clones a git repo.
func (b *gitRepoVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUpAt creates new directory and clones a git repo.
func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
if volumeutil.IsReady(b.getMetaDir()) {
return nil
}
// Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, b.opts)
if err != nil {
return err
}
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
return err
}
args := []string{"clone", "--", b.source}
if len(b.target) != 0 {
args = append(args, b.target)
}
if output, err := b.execCommand("git", args, dir); err != nil {
return fmt.Errorf("failed to exec 'git %s': %s: %v",
strings.Join(args, " "), output, err)
}
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
if len(b.revision) == 0 {
// Done!
volumeutil.SetReady(b.getMetaDir())
return nil
}
var subdir string
switch {
case len(b.target) != 0 && filepath.Clean(b.target) == ".":
// if target dir is '.', use the current dir
subdir = path.Join(dir)
case len(files) == 1:
// if target is not '.', use the generated folder
subdir = path.Join(dir, files[0].Name())
default:
// if target is not '.', but generated many files, it's wrong
return fmt.Errorf("unexpected directory contents: %v", files)
}
if output, err := b.execCommand("git", []string{"checkout", b.revision}, subdir); err != nil {
return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", b.revision, output, err)
}
if output, err := b.execCommand("git", []string{"reset", "--hard"}, subdir); err != nil {
return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err)
}
volume.SetVolumeOwnership(b, fsGroup)
volumeutil.SetReady(b.getMetaDir())
return nil
}
func (b *gitRepoVolumeMounter) getMetaDir() string {
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedName(gitRepoPluginName)), b.volName)
}
func (b *gitRepoVolumeMounter) execCommand(command string, args []string, dir string) ([]byte, error) {
cmd := b.exec.Command(command, args...)
cmd.SetDir(dir)
return cmd.CombinedOutput()
}
func validateVolume(src *v1.GitRepoVolumeSource) error {
if err := validateNonFlagArgument(src.Repository, "repository"); err != nil {
return err
}
if err := validateNonFlagArgument(src.Revision, "revision"); err != nil {
return err
}
if err := validateNonFlagArgument(src.Directory, "directory"); err != nil {
return err
}
return nil
}
// gitRepoVolumeUnmounter cleans git repo volumes.
type gitRepoVolumeUnmounter struct {
*gitRepoVolume
}
var _ volume.Unmounter = &gitRepoVolumeUnmounter{}
// TearDown simply deletes everything in the directory.
func (c *gitRepoVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// TearDownAt simply deletes everything in the directory.
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
return volumeutil.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
var readOnly bool
var volumeSource *v1.GitRepoVolumeSource
if spec.Volume != nil && spec.Volume.GitRepo != nil {
volumeSource = spec.Volume.GitRepo
readOnly = spec.ReadOnly
}
return volumeSource, readOnly
}
func validateNonFlagArgument(arg, argName string) error {
if len(arg) > 0 && arg[0] == '-' {
return fmt.Errorf("%q is an invalid value for %s", arg, argName)
}
return nil
}

View File

@ -1,450 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git_repo
import (
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/emptydir"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
func newTestHost(t *testing.T) (string, volume.VolumeHost) {
tempDir, err := ioutil.TempDir("/tmp", "git_repo_test.")
if err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
}
return tempDir, volumetest.NewFakeVolumeHost(tempDir, nil, emptydir.ProbeVolumePlugins())
}
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
tempDir, host := newTestHost(t)
defer os.RemoveAll(tempDir)
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/git-repo" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{GitRepo: &v1.GitRepoVolumeSource{}}}}) {
t.Errorf("Expected true")
}
}
// Expected command
type expectedCommand struct {
// The git command
cmd []string
// The dir of git command is executed
dir string
}
func TestPlugin(t *testing.T) {
gitUrl := "https://github.com/kubernetes/kubernetes.git"
revision := "2a30ce65c5ab586b98916d83385c5983edd353a1"
scenarios := []struct {
name string
vol *v1.Volume
expecteds []expectedCommand
isExpectedFailure bool
}{
{
name: "target-dir",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Revision: revision,
Directory: "target_dir",
},
},
},
expecteds: []expectedCommand{
{
cmd: []string{"git", "clone", "--", gitUrl, "target_dir"},
dir: "",
},
{
cmd: []string{"git", "checkout", revision},
dir: "/target_dir",
},
{
cmd: []string{"git", "reset", "--hard"},
dir: "/target_dir",
},
},
isExpectedFailure: false,
},
{
name: "target-dir-no-revision",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Directory: "target_dir",
},
},
},
expecteds: []expectedCommand{
{
cmd: []string{"git", "clone", "--", gitUrl, "target_dir"},
dir: "",
},
},
isExpectedFailure: false,
},
{
name: "only-git-clone",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
},
},
},
expecteds: []expectedCommand{
{
cmd: []string{"git", "clone", "--", gitUrl},
dir: "",
},
},
isExpectedFailure: false,
},
{
name: "no-target-dir",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Revision: revision,
Directory: "",
},
},
},
expecteds: []expectedCommand{
{
cmd: []string{"git", "clone", "--", gitUrl},
dir: "",
},
{
cmd: []string{"git", "checkout", revision},
dir: "/kubernetes",
},
{
cmd: []string{"git", "reset", "--hard"},
dir: "/kubernetes",
},
},
isExpectedFailure: false,
},
{
name: "current-dir",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Revision: revision,
Directory: ".",
},
},
},
expecteds: []expectedCommand{
{
cmd: []string{"git", "clone", "--", gitUrl, "."},
dir: "",
},
{
cmd: []string{"git", "checkout", revision},
dir: "",
},
{
cmd: []string{"git", "reset", "--hard"},
dir: "",
},
},
isExpectedFailure: false,
},
{
name: "current-dir-mess",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Revision: revision,
Directory: "./.",
},
},
},
expecteds: []expectedCommand{
{
cmd: []string{"git", "clone", "--", gitUrl, "./."},
dir: "",
},
{
cmd: []string{"git", "checkout", revision},
dir: "",
},
{
cmd: []string{"git", "reset", "--hard"},
dir: "",
},
},
isExpectedFailure: false,
},
{
name: "invalid-repository",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: "--foo",
},
},
},
isExpectedFailure: true,
},
{
name: "invalid-revision",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Revision: "--bar",
},
},
},
isExpectedFailure: true,
},
{
name: "invalid-directory",
vol: &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitUrl,
Directory: "-b",
},
},
},
isExpectedFailure: true,
},
}
for _, scenario := range scenarios {
allErrs := doTestPlugin(scenario, t)
if len(allErrs) == 0 && scenario.isExpectedFailure {
t.Errorf("Unexpected success for scenario: %s", scenario.name)
}
if len(allErrs) > 0 && !scenario.isExpectedFailure {
t.Errorf("Unexpected failure for scenario: %s - %+v", scenario.name, allErrs)
}
}
}
func doTestPlugin(scenario struct {
name string
vol *v1.Volume
expecteds []expectedCommand
isExpectedFailure bool
}, t *testing.T) []error {
allErrs := []error{}
plugMgr := volume.VolumePluginMgr{}
rootDir, host := newTestHost(t)
defer os.RemoveAll(rootDir)
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo")
if err != nil {
allErrs = append(allErrs,
fmt.Errorf("Can't find the plugin by name"))
return allErrs
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(scenario.vol), pod, volume.VolumeOptions{})
if err != nil {
allErrs = append(allErrs,
fmt.Errorf("Failed to make a new Mounter: %v", err))
return allErrs
}
if mounter == nil {
allErrs = append(allErrs,
fmt.Errorf("Got a nil Mounter"))
return allErrs
}
path := mounter.GetPath()
suffix := fmt.Sprintf("pods/poduid/volumes/kubernetes.io~git-repo/%v", scenario.vol.Name)
if !strings.HasSuffix(path, suffix) {
allErrs = append(allErrs,
fmt.Errorf("Got unexpected path: %s", path))
return allErrs
}
// Test setUp()
setUpErrs := doTestSetUp(scenario, mounter)
allErrs = append(allErrs, setUpErrs...)
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
allErrs = append(allErrs,
fmt.Errorf("SetUp() failed, volume path not created: %s", path))
return allErrs
} else {
allErrs = append(allErrs,
fmt.Errorf("SetUp() failed: %v", err))
return allErrs
}
}
// gitRepo volume should create its own empty wrapper path
podWrapperMetadataDir := fmt.Sprintf("%v/pods/poduid/plugins/kubernetes.io~empty-dir/wrapped_%v", rootDir, scenario.vol.Name)
if _, err := os.Stat(podWrapperMetadataDir); err != nil {
if os.IsNotExist(err) {
allErrs = append(allErrs,
fmt.Errorf("SetUp() failed, empty-dir wrapper path is not created: %s", podWrapperMetadataDir))
} else {
allErrs = append(allErrs,
fmt.Errorf("SetUp() failed: %v", err))
}
}
unmounter, err := plug.NewUnmounter("vol1", types.UID("poduid"))
if err != nil {
allErrs = append(allErrs,
fmt.Errorf("Failed to make a new Unmounter: %v", err))
return allErrs
}
if unmounter == nil {
allErrs = append(allErrs,
fmt.Errorf("Got a nil Unmounter"))
return allErrs
}
if err := unmounter.TearDown(); err != nil {
allErrs = append(allErrs,
fmt.Errorf("Expected success, got: %v", err))
return allErrs
}
if _, err := os.Stat(path); err == nil {
allErrs = append(allErrs,
fmt.Errorf("TearDown() failed, volume path still exists: %s", path))
} else if !os.IsNotExist(err) {
allErrs = append(allErrs,
fmt.Errorf("TearDown() failed: %v", err))
}
return allErrs
}
func doTestSetUp(scenario struct {
name string
vol *v1.Volume
expecteds []expectedCommand
isExpectedFailure bool
}, mounter volume.Mounter) []error {
expecteds := scenario.expecteds
allErrs := []error{}
// Construct combined outputs from expected commands
var fakeOutputs []fakeexec.FakeCombinedOutputAction
var fcmd fakeexec.FakeCmd
for _, expected := range expecteds {
if expected.cmd[1] == "clone" {
fakeOutputs = append(fakeOutputs, func() ([]byte, error) {
// git clone, it creates new dir/files
os.MkdirAll(path.Join(fcmd.Dirs[0], expected.dir), 0750)
return []byte{}, nil
})
} else {
// git checkout || git reset, they create nothing
fakeOutputs = append(fakeOutputs, func() ([]byte, error) {
return []byte{}, nil
})
}
}
fcmd = fakeexec.FakeCmd{
CombinedOutputScript: fakeOutputs,
}
// Construct fake exec outputs from fcmd
var fakeAction []fakeexec.FakeCommandAction
for i := 0; i < len(expecteds); i++ {
fakeAction = append(fakeAction, func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
})
}
fake := fakeexec.FakeExec{
CommandScript: fakeAction,
}
g := mounter.(*gitRepoVolumeMounter)
g.exec = &fake
g.SetUp(nil)
if fake.CommandCalls != len(expecteds) {
allErrs = append(allErrs,
fmt.Errorf("unexpected command calls in scenario: expected %d, saw: %d", len(expecteds), fake.CommandCalls))
}
var expectedCmds [][]string
for _, expected := range expecteds {
expectedCmds = append(expectedCmds, expected.cmd)
}
if !reflect.DeepEqual(expectedCmds, fcmd.CombinedOutputLog) {
allErrs = append(allErrs,
fmt.Errorf("unexpected commands: %v, expected: %v", fcmd.CombinedOutputLog, expectedCmds))
}
var expectedPaths []string
for _, expected := range expecteds {
expectedPaths = append(expectedPaths, g.GetPath()+expected.dir)
}
if len(fcmd.Dirs) != len(expectedPaths) || !reflect.DeepEqual(expectedPaths, fcmd.Dirs) {
allErrs = append(allErrs,
fmt.Errorf("unexpected directories: %v, expected: %v", fcmd.Dirs, expectedPaths))
}
return allErrs
}

View File

@ -1,73 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"glusterfs.go",
"glusterfs_minmax.go",
"glusterfs_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/glusterfs",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library",
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"glusterfs_minmax_test.go",
"glusterfs_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,14 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- rootfs
- saad-ali
- jingxu97
- humblec
reviewers:
- saad-ali
- jsafrane
- rootfs
- humblec
- jingxu97
- msau42

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package glusterfs contains the internal representation of glusterfs
// volumes.
package glusterfs // import "k8s.io/kubernetes/pkg/volume/glusterfs"

File diff suppressed because it is too large Load Diff

View File

@ -1,193 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// This implementation is space-efficient for a sparse
// allocation over a big range. Could be optimized
// for high absolute allocation number with a bitmap.
//
package glusterfs
import (
"errors"
"sync"
)
var (
//ErrConflict returned when value is already in use.
ErrConflict = errors.New("number already allocated")
//ErrInvalidRange returned invalid range, for eg# min > max
ErrInvalidRange = errors.New("invalid range")
//ErrOutOfRange returned when value is not in pool range.
ErrOutOfRange = errors.New("out of range")
//ErrRangeFull returned when no more free values in the pool.
ErrRangeFull = errors.New("range full")
//ErrInternal returned when no free item found, but a.free != 0.
ErrInternal = errors.New("internal error")
)
//MinMaxAllocator defines allocator struct.
type MinMaxAllocator struct {
lock sync.Mutex
min int
max int
free int
used map[int]bool
}
var _ Rangeable = &MinMaxAllocator{}
// Rangeable is an Interface that can adjust its min/max range.
// Rangeable should be threadsafe
type Rangeable interface {
Allocate(int) (bool, error)
AllocateNext() (int, bool, error)
Release(int) error
Has(int) bool
Free() int
SetRange(min, max int) error
}
// NewMinMaxAllocator return a new allocator or error based on provided min/max value.
func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
if min > max {
return nil, ErrInvalidRange
}
return &MinMaxAllocator{
min: min,
max: max,
free: 1 + max - min,
used: map[int]bool{},
}, nil
}
//SetRange defines the range/pool with provided min and max values.
func (a *MinMaxAllocator) SetRange(min, max int) error {
if min > max {
return ErrInvalidRange
}
a.lock.Lock()
defer a.lock.Unlock()
// Check if we need to change
if a.min == min && a.max == max {
return nil
}
a.min = min
a.max = max
// Recompute how many free we have in the range
numUsed := 0
for i := range a.used {
if a.inRange(i) {
numUsed++
}
}
a.free = 1 + max - min - numUsed
return nil
}
//Allocate allocates provided value in the allocator and mark it as used.
func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
if !a.inRange(i) {
return false, ErrOutOfRange
}
if a.has(i) {
return false, ErrConflict
}
a.used[i] = true
a.free--
return true, nil
}
//AllocateNext allocates next value from the allocator.
func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
// Fast check if we're out of items
if a.free <= 0 {
return 0, false, ErrRangeFull
}
// Scan from the minimum until we find a free item
for i := a.min; i <= a.max; i++ {
if !a.has(i) {
a.used[i] = true
a.free--
return i, true, nil
}
}
// no free item found, but a.free != 0
return 0, false, ErrInternal
}
//Release free/delete provided value from the allocator.
func (a *MinMaxAllocator) Release(i int) error {
a.lock.Lock()
defer a.lock.Unlock()
if !a.has(i) {
return nil
}
delete(a.used, i)
if a.inRange(i) {
a.free++
}
return nil
}
func (a *MinMaxAllocator) has(i int) bool {
_, ok := a.used[i]
return ok
}
//Has check whether the provided value is used in the allocator
func (a *MinMaxAllocator) Has(i int) bool {
a.lock.Lock()
defer a.lock.Unlock()
return a.has(i)
}
//Free returns the number of free values in the allocator.
func (a *MinMaxAllocator) Free() int {
a.lock.Lock()
defer a.lock.Unlock()
return a.free
}
func (a *MinMaxAllocator) inRange(i int) bool {
return a.min <= i && i <= a.max
}

View File

@ -1,226 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"testing"
)
func TestNewFree(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if f := m.Free(); f != (max - min + 1) {
t.Errorf("expect to get %d free, but got %d", (max - min + 1), f)
}
}
func TestNewInvalidRange(t *testing.T) {
if _, err := NewMinMaxAllocator(10, 1); err != ErrInvalidRange {
t.Errorf("expect to get Error '%v', got '%v'", ErrInvalidRange, err)
}
}
func TestSetRange(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if err = m.SetRange(10, 1); err != ErrInvalidRange {
t.Errorf("expected to get error '%v', got '%v'", ErrInvalidRange, err)
}
if err = m.SetRange(1, 2); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 2 {
t.Errorf("expect to get %d free, but got %d", 2, f)
}
if ok, _ := m.Allocate(1); !ok {
t.Errorf("error allocate offset %v", 1)
}
if f := m.Free(); f != 1 {
t.Errorf("expect to get 1 free, but got %d", f)
}
if err = m.SetRange(1, 1); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 0 {
t.Errorf("expect to get 0 free, but got %d", f)
}
if err = m.SetRange(2, 2); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 1 {
t.Errorf("expect to get 1 free, but got %d", f)
}
}
func TestAllocateNext(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
el, ok, _ := m.AllocateNext()
if !ok {
t.Fatalf("unexpected error")
}
if !m.Has(el) {
t.Errorf("expect element %v allocated", el)
}
if f := m.Free(); f != (max-min+1)-1 {
t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f)
}
}
func TestAllocateMax(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
for i := 1; i <= max; i++ {
if _, ok, _ := m.AllocateNext(); !ok {
t.Fatalf("unexpected error")
}
}
if _, ok, _ := m.AllocateNext(); ok {
t.Errorf("unexpected success")
}
if f := m.Free(); f != 0 {
t.Errorf("expect to get %d free, but got %d", 0, f)
}
}
func TestAllocate(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
if !m.Has(offset) {
t.Errorf("expect element %v allocated", offset)
}
if f := m.Free(); f != (max-min+1)-1 {
t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f)
}
}
func TestAllocateConflict(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
ok, err := m.Allocate(offset)
if ok {
t.Errorf("unexpected success")
}
if err != ErrConflict {
t.Errorf("expected error '%v', got '%v'", ErrConflict, err)
}
}
func TestAllocateOutOfRange(t *testing.T) {
min := 1
max := 10
offset := 11
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
ok, err := m.Allocate(offset)
if ok {
t.Errorf("unexpected success")
}
if err != ErrOutOfRange {
t.Errorf("expected error '%v', got '%v'", ErrOutOfRange, err)
}
}
func TestRelease(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
if !m.Has(offset) {
t.Errorf("expect offset %v allocated", offset)
}
if err = m.Release(offset); err != nil {
t.Errorf("unexpected error: %v", err)
}
if m.Has(offset) {
t.Errorf("expect offset %v not allocated", offset)
}
}

View File

@ -1,762 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"fmt"
"os"
"reflect"
"testing"
gapi "github.com/heketi/heketi/pkg/glusterfs/api"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/glusterfs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Glusterfs: &v1.GlusterfsPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func doTestPlugin(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
ep := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{})
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~glusterfs/vol1", tmpDir)
if volumePath != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, volumePath)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}
func TestPluginVolume(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsPersistentVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsPersistentVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
ep := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Namespace: "nsA",
Name: "ep",
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
}},
}
client := fake.NewSimpleClientset(pv, claim, ep)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestParseClassParameters(t *testing.T) {
secret := v1.Secret{
Type: "kubernetes.io/glusterfs",
Data: map[string][]byte{
"data": []byte("mypassword"),
},
}
tests := []struct {
name string
parameters map[string]string
secret *v1.Secret
expectError bool
expectConfig *provisionerConfig
}{
{
"password",
map[string]string{
"resturl": "https://localhost:8080",
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
user: "admin",
userKey: "password",
secretValue: "password",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"secret",
map[string]string{
"resturl": "https://localhost:8080",
"restuser": "admin",
"secretname": "mysecret",
"secretnamespace": "default",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
user: "admin",
secretName: "mysecret",
secretNamespace: "default",
secretValue: "mypassword",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"no authentication",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"missing secret",
map[string]string{
"resturl": "https://localhost:8080",
"secretname": "mysecret",
"secretnamespace": "default",
},
nil, // secret
true, // expect error
nil,
},
{
"secret with no namespace",
map[string]string{
"resturl": "https://localhost:8080",
"secretname": "mysecret",
},
&secret,
true, // expect error
nil,
},
{
"missing url",
map[string]string{
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
true, // expect error
nil,
},
{
"unknown parameter",
map[string]string{
"unknown": "yes",
"resturl": "https://localhost:8080",
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
true, // expect error
nil,
},
{
"invalid gidMin #1",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "0",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin #2",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin #3",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #1",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "0",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #2",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #3",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin:gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "5001",
"gidMax": "5000",
},
&secret,
true, // expect error
nil,
},
{
"valid gidMin",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "5000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 2000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid gidMin:gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid volumetype: replicate",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid volumetype: disperse",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "disperse:4:2",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid snapfactor: 50",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "disperse:4:2",
"snapfactor": "50",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
thinPoolSnapFactor: float32(50),
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"valid volumenameprefix: dept-dev",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "disperse:4:2",
"snapfactor": "50",
"volumenameprefix": "dept-dev",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
thinPoolSnapFactor: float32(50),
volumeNamePrefix: "dept-dev",
customEpNamePrefix: "glusterfs-dynamic",
},
},
{
"invalid volumetype (disperse) parameter",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "disperse:4:asd",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype (replicate) parameter",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "replicate:asd",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype: unknown volumetype",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "dispersereplicate:4:2",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype : negative value",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "replicate:-1000",
},
&secret,
true, // expect error
nil,
},
{
"invalid thinPoolSnapFactor: value out of range",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"snapfactor": "0.5",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumenameprefix: string starting with '_'",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumenameprefix": "_",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumenameprefix: string with '_'",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumenameprefix": "qe_dept",
},
&secret,
true, // expect error
nil,
},
{
"invalid thinPoolSnapFactor: value out of range",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"snapfactor": "120",
},
&secret,
true, // expect error
nil,
},
{
"enable custom ep/svc name: customEpNamePrefix: myprefix",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
"customEpNamePrefix": "myprefix",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "myprefix",
},
},
{
"empty custom ep/svc name: customEpNamePrefix:''",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
"customEpNamePrefix": "",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "",
},
},
{
"custom ep/svc name with 26 chars: customEpNamePrefix:'charstringhastwentysixchar'",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
"customEpNamePrefix": "charstringhastwentysixchar",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
thinPoolSnapFactor: float32(1.0),
customEpNamePrefix: "charstringhastwentysixchar",
},
},
{
"invalid customepnameprefix ( ie >26 chars) parameter",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
"customEpNamePrefix": "myprefixhasmorethan26characters",
},
&secret,
true, // expect error
nil,
},
}
for _, test := range tests {
client := &fake.Clientset{}
client.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if test.secret != nil {
return true, test.secret, nil
}
return true, nil, fmt.Errorf("Test %s did not set a secret", test.name)
})
cfg, err := parseClassParameters(test.parameters, client)
if err != nil && !test.expectError {
t.Errorf("Test %s got unexpected error %v", test.name, err)
}
if err == nil && test.expectError {
t.Errorf("test %s expected error and got none", test.name)
}
if test.expectConfig != nil {
if !reflect.DeepEqual(cfg, test.expectConfig) {
t.Errorf("Test %s returned unexpected data, expected: %+v, got: %+v", test.name, test.expectConfig, cfg)
}
}
}
}

View File

@ -1,71 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"bufio"
"fmt"
"os"
"k8s.io/klog"
)
// readGlusterLog will take the last 2 lines of the log file
// on failure of gluster SetUp and return those so kubelet can
// properly expose them
// return error on any failure
func readGlusterLog(path string, podName string) error {
var line1 string
var line2 string
linecount := 0
klog.Infof("failure, now attempting to read the gluster log for pod %s", podName)
// Check and make sure path exists
if len(path) == 0 {
return fmt.Errorf("log file does not exist for pod %s", podName)
}
// open the log file
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("could not open log file for pod %s", podName)
}
defer file.Close()
// read in and scan the file using scanner
// from stdlib
fscan := bufio.NewScanner(file)
// rather than guessing on bytes or using Seek
// going to scan entire file and take the last two lines
// generally the file should be small since it is pod specific
for fscan.Scan() {
if linecount > 0 {
line1 = line2
}
line2 = "\n" + fscan.Text()
linecount++
}
if linecount > 0 {
return fmt.Errorf("%v", line1+line2+"\n")
}
return nil
}

View File

@ -1,63 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["portworx_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"portworx.go",
"portworx_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/portworx",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/api:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/api/client:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/api/client/volume:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/api/spec:go_default_library",
"//vendor/github.com/libopenstorage/openstorage/volume:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
maintainers:
- adityadani

View File

@ -1,19 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package portworx contains the internal representation of Portworx
// Block Device volumes.
package portworx

View File

@ -1,445 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portworx
import (
"fmt"
"os"
volumeclient "github.com/libopenstorage/openstorage/api/client/volume"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
utilstrings "k8s.io/utils/strings"
)
const (
attachContextKey = "context"
attachHostKey = "host"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&portworxVolumePlugin{nil, nil}}
}
type portworxVolumePlugin struct {
host volume.VolumeHost
util *portworxVolumeUtil
}
var _ volume.VolumePlugin = &portworxVolumePlugin{}
var _ volume.PersistentVolumePlugin = &portworxVolumePlugin{}
var _ volume.DeletableVolumePlugin = &portworxVolumePlugin{}
var _ volume.ProvisionableVolumePlugin = &portworxVolumePlugin{}
var _ volume.ExpandableVolumePlugin = &portworxVolumePlugin{}
const (
portworxVolumePluginName = "kubernetes.io/portworx-volume"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(portworxVolumePluginName), volName)
}
func (plugin *portworxVolumePlugin) Init(host volume.VolumeHost) error {
client, err := volumeclient.NewDriverClient(
fmt.Sprintf("http://%s:%d", host.GetHostName(), osdMgmtDefaultPort),
pxdDriverName, osdDriverVersion, pxDriverName)
if err != nil {
return err
}
plugin.host = host
plugin.util = &portworxVolumeUtil{
portworxClient: client,
}
return nil
}
func (plugin *portworxVolumePlugin) GetPluginName() string {
return portworxVolumePluginName
}
func (plugin *portworxVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.VolumeID, nil
}
func (plugin *portworxVolumePlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.PortworxVolume != nil) ||
(spec.Volume != nil && spec.Volume.PortworxVolume != nil)
}
func (plugin *portworxVolumePlugin) IsMigratedToCSI() bool {
return false
}
func (plugin *portworxVolumePlugin) RequiresRemount() bool {
return false
}
func (plugin *portworxVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadWriteMany,
}
}
func (plugin *portworxVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, plugin.util, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *portworxVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager portworxManager, mounter mount.Interface) (volume.Mounter, error) {
pwx, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
volumeID := pwx.VolumeID
fsType := pwx.FSType
return &portworxVolumeMounter{
portworxVolume: &portworxVolume{
podUID: podUID,
volName: spec.Name(),
volumeID: volumeID,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
fsType: fsType,
readOnly: readOnly,
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host)}, nil
}
func (plugin *portworxVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.util, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *portworxVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager portworxManager,
mounter mount.Interface) (volume.Unmounter, error) {
return &portworxVolumeUnmounter{
&portworxVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *portworxVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, plugin.util)
}
func (plugin *portworxVolumePlugin) newDeleterInternal(spec *volume.Spec, manager portworxManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.PortworxVolume == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.PortworxVolume is nil")
}
return &portworxVolumeDeleter{
portworxVolume: &portworxVolume{
volName: spec.Name(),
volumeID: spec.PersistentVolume.Spec.PortworxVolume.VolumeID,
manager: manager,
plugin: plugin,
}}, nil
}
func (plugin *portworxVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, plugin.util)
}
func (plugin *portworxVolumePlugin) newProvisionerInternal(options volume.VolumeOptions, manager portworxManager) (volume.Provisioner, error) {
return &portworxVolumeProvisioner{
portworxVolume: &portworxVolume{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func (plugin *portworxVolumePlugin) RequiresFSResize() bool {
return false
}
func (plugin *portworxVolumePlugin) ExpandVolumeDevice(
spec *volume.Spec,
newSize resource.Quantity,
oldSize resource.Quantity) (resource.Quantity, error) {
klog.V(4).Infof("Expanding: %s from %v to %v", spec.Name(), oldSize, newSize)
err := plugin.util.ResizeVolume(spec, newSize, plugin.host)
if err != nil {
return oldSize, err
}
klog.V(4).Infof("Successfully resized %s to %v", spec.Name(), newSize)
return newSize, nil
}
func (plugin *portworxVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
portworxVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
PortworxVolume: &v1.PortworxVolumeSource{
VolumeID: volumeName,
},
},
}
return volume.NewSpecFromVolume(portworxVolume), nil
}
func (plugin *portworxVolumePlugin) SupportsMountOption() bool {
return false
}
func (plugin *portworxVolumePlugin) SupportsBulkVolumeVerification() bool {
return false
}
func getVolumeSource(
spec *volume.Spec) (*v1.PortworxVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.PortworxVolume != nil {
return spec.Volume.PortworxVolume, spec.Volume.PortworxVolume.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.PortworxVolume != nil {
return spec.PersistentVolume.Spec.PortworxVolume, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a Portworx Volume type")
}
// Abstract interface to PD operations.
type portworxManager interface {
// Creates a volume
CreateVolume(provisioner *portworxVolumeProvisioner) (volumeID string, volumeSizeGB int64, labels map[string]string, err error)
// Deletes a volume
DeleteVolume(deleter *portworxVolumeDeleter) error
// Attach a volume
AttachVolume(mounter *portworxVolumeMounter, attachOptions map[string]string) (string, error)
// Detach a volume
DetachVolume(unmounter *portworxVolumeUnmounter) error
// Mount a volume
MountVolume(mounter *portworxVolumeMounter, mountDir string) error
// Unmount a volume
UnmountVolume(unmounter *portworxVolumeUnmounter, mountDir string) error
// Resize a volume
ResizeVolume(spec *volume.Spec, newSize resource.Quantity, host volume.VolumeHost) error
}
// portworxVolume volumes are portworx block devices
// that are attached to the kubelet's host machine and exposed to the pod.
type portworxVolume struct {
volName string
podUID types.UID
// Unique id of the PD, used to find the disk resource in the provider.
volumeID string
// Utility interface that provides API calls to the provider to attach/detach disks.
manager portworxManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
plugin *portworxVolumePlugin
volume.MetricsProvider
}
type portworxVolumeMounter struct {
*portworxVolume
// Filesystem type, optional.
fsType string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// diskMounter provides the interface that is used to mount the actual block device.
diskMounter *mount.SafeFormatAndMount
}
var _ volume.Mounter = &portworxVolumeMounter{}
func (b *portworxVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: false,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *portworxVolumeMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *portworxVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
klog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("Cannot validate mountpoint: %s", dir)
return err
}
if !notMnt {
return nil
}
attachOptions := make(map[string]string)
attachOptions[attachContextKey] = dir
attachOptions[attachHostKey] = b.plugin.host.GetHostName()
if _, err := b.manager.AttachVolume(b, attachOptions); err != nil {
return err
}
klog.V(4).Infof("Portworx Volume %s attached", b.volumeID)
if err := os.MkdirAll(dir, 0750); err != nil {
return err
}
if err := b.manager.MountVolume(b, dir); err != nil {
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
klog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir)
return nil
}
func (pwx *portworxVolume) GetPath() string {
return getPath(pwx.podUID, pwx.volName, pwx.plugin.host)
}
type portworxVolumeUnmounter struct {
*portworxVolume
}
var _ volume.Unmounter = &portworxVolumeUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *portworxVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *portworxVolumeUnmounter) TearDownAt(dir string) error {
klog.Infof("Portworx Volume TearDown of %s", dir)
if err := c.manager.UnmountVolume(c, dir); err != nil {
return err
}
// Call Portworx Detach Volume.
if err := c.manager.DetachVolume(c); err != nil {
return err
}
return nil
}
type portworxVolumeDeleter struct {
*portworxVolume
}
var _ volume.Deleter = &portworxVolumeDeleter{}
func (d *portworxVolumeDeleter) GetPath() string {
return getPath(d.podUID, d.volName, d.plugin.host)
}
func (d *portworxVolumeDeleter) Delete() error {
return d.manager.DeleteVolume(d)
}
type portworxVolumeProvisioner struct {
*portworxVolume
options volume.VolumeOptions
namespace string
}
var _ volume.Provisioner = &portworxVolumeProvisioner{}
func (c *portworxVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
}
volumeID, sizeGiB, labels, err := c.manager.CreateVolume(c)
if err != nil {
return nil, err
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
util.VolumeDynamicallyCreatedByKey: "portworx-volume-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
PortworxVolume: &v1.PortworxVolumeSource{
VolumeID: volumeID,
},
},
},
}
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
}
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
return pv, nil
}

View File

@ -1,237 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portworx
import (
"fmt"
"os"
"path"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
PortworxTestVolume = "portworx-test-vol"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("portworxVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/portworx-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/portworx-volume" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{PortworxVolume: &v1.PortworxVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{PortworxVolume: &v1.PortworxVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("portworxVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/portworx-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) {
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteOnce)
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected to support AccessModeTypes: %s", v1.ReadWriteMany)
}
if volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected not to support AccessModeTypes: %s", v1.ReadOnlyMany)
}
}
type fakePortworxManager struct {
attachCalled bool
mountCalled bool
}
func (fake *fakePortworxManager) AttachVolume(b *portworxVolumeMounter, attachOptions map[string]string) (string, error) {
fake.attachCalled = true
return "", nil
}
func (fake *fakePortworxManager) DetachVolume(c *portworxVolumeUnmounter) error {
return nil
}
func (fake *fakePortworxManager) MountVolume(b *portworxVolumeMounter, mountPath string) error {
fake.mountCalled = true
return nil
}
func (fake *fakePortworxManager) UnmountVolume(c *portworxVolumeUnmounter, mountPath string) error {
return nil
}
func (fake *fakePortworxManager) CreateVolume(c *portworxVolumeProvisioner) (volumeID string, volumeSizeGB int64, labels map[string]string, err error) {
labels = make(map[string]string)
labels["fakeportworxmanager"] = "yes"
return PortworxTestVolume, 100, labels, nil
}
func (fake *fakePortworxManager) DeleteVolume(cd *portworxVolumeDeleter) error {
if cd.volumeID != PortworxTestVolume {
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.volumeID)
}
return nil
}
func (fake *fakePortworxManager) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error {
return nil
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("portworxVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/portworx-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
PortworxVolume: &v1.PortworxVolumeSource{
VolumeID: PortworxTestVolume,
FSType: "ext4",
},
},
}
fakeManager := &fakePortworxManager{}
// Test Mounter
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*portworxVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~portworx-volume/vol1")
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if !fakeManager.attachCalled {
t.Errorf("Attach watch not called")
}
if !fakeManager.mountCalled {
t.Errorf("Mount watch not called")
}
// Test Unmounter
fakeManager = &fakePortworxManager{}
unmounter, err := plug.(*portworxVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*portworxVolumePlugin).newProvisionerInternal(options, &fakePortworxManager{})
if err != nil {
t.Errorf("Error creating a new provisioner:%v", err)
}
persistentSpec, err := provisioner.Provision(nil, nil)
if err != nil {
t.Errorf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.PortworxVolume.VolumeID != PortworxTestVolume {
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.PortworxVolume.VolumeID)
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
if persistentSpec.Labels["fakeportworxmanager"] != "yes" {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*portworxVolumePlugin).newDeleterInternal(volSpec, &fakePortworxManager{})
if err != nil {
t.Errorf("Error creating a new Deleter:%v", err)
}
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
}

View File

@ -1,374 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portworx
import (
"fmt"
osdapi "github.com/libopenstorage/openstorage/api"
osdclient "github.com/libopenstorage/openstorage/api/client"
volumeclient "github.com/libopenstorage/openstorage/api/client/volume"
osdspec "github.com/libopenstorage/openstorage/api/spec"
volumeapi "github.com/libopenstorage/openstorage/volume"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/klog"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/volume"
)
const (
osdMgmtDefaultPort = 9001
osdDriverVersion = "v1"
pxdDriverName = "pxd"
pvcClaimLabel = "pvc"
pvcNamespaceLabel = "namespace"
pxServiceName = "portworx-service"
pxDriverName = "pxd-sched"
)
type portworxVolumeUtil struct {
portworxClient *osdclient.Client
}
// CreateVolume creates a Portworx volume.
func (util *portworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) {
driver, err := util.getPortworxDriver(p.plugin.host)
if err != nil || driver == nil {
klog.Errorf("Failed to get portworx driver. Err: %v", err)
return "", 0, nil, err
}
klog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// Portworx Volumes are specified in GiB
requestGiB := volumehelpers.RoundUpToGiB(capacity)
// Perform a best-effort parsing of parameters. Portworx 1.2.9 and later parses volume parameters from
// spec.VolumeLabels. So even if below SpecFromOpts() fails to parse certain parameters or
// doesn't support new parameters, the server-side processing will parse it correctly.
// We still need to call SpecFromOpts() here to handle cases where someone is running Portworx 1.2.8 and lower.
specHandler := osdspec.NewSpecHandler()
spec, locator, source, _ := specHandler.SpecFromOpts(p.options.Parameters)
if spec == nil {
spec = specHandler.DefaultSpec()
}
// Pass all parameters as volume labels for Portworx server-side processing
if spec.VolumeLabels == nil {
spec.VolumeLabels = make(map[string]string, 0)
}
for k, v := range p.options.Parameters {
spec.VolumeLabels[k] = v
}
// Update the requested size in the spec
spec.Size = uint64(requestGiB * volumehelpers.GiB)
// Change the Portworx Volume name to PV name
if locator == nil {
locator = &osdapi.VolumeLocator{
VolumeLabels: make(map[string]string),
}
}
locator.Name = p.options.PVName
// Add claim Name as a part of Portworx Volume Labels
locator.VolumeLabels[pvcClaimLabel] = p.options.PVC.Name
locator.VolumeLabels[pvcNamespaceLabel] = p.options.PVC.Namespace
for k, v := range p.options.PVC.Annotations {
if _, present := spec.VolumeLabels[k]; present {
klog.Warningf("not saving annotation: %s=%s in spec labels due to an existing key", k, v)
continue
}
spec.VolumeLabels[k] = v
}
volumeID, err := driver.Create(locator, source, spec)
if err != nil {
klog.Errorf("Error creating Portworx Volume : %v", err)
return "", 0, nil, err
}
klog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name)
return volumeID, requestGiB, nil, err
}
// DeleteVolume deletes a Portworx volume
func (util *portworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error {
driver, err := util.getPortworxDriver(d.plugin.host)
if err != nil || driver == nil {
klog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}
err = driver.Delete(d.volumeID)
if err != nil {
klog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err)
return err
}
return nil
}
// AttachVolume attaches a Portworx Volume
func (util *portworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOptions map[string]string) (string, error) {
driver, err := util.getLocalPortworxDriver(m.plugin.host)
if err != nil || driver == nil {
klog.Errorf("Failed to get portworx driver. Err: %v", err)
return "", err
}
devicePath, err := driver.Attach(m.volName, attachOptions)
if err != nil {
klog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err)
return "", err
}
return devicePath, nil
}
// DetachVolume detaches a Portworx Volume
func (util *portworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error {
driver, err := util.getLocalPortworxDriver(u.plugin.host)
if err != nil || driver == nil {
klog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}
err = driver.Detach(u.volName, false /*doNotForceDetach*/)
if err != nil {
klog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err)
return err
}
return nil
}
// MountVolume mounts a Portworx Volume on the specified mountPath
func (util *portworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error {
driver, err := util.getLocalPortworxDriver(m.plugin.host)
if err != nil || driver == nil {
klog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}
err = driver.Mount(m.volName, mountPath)
if err != nil {
klog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err)
return err
}
return nil
}
// UnmountVolume unmounts a Portworx Volume
func (util *portworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error {
driver, err := util.getLocalPortworxDriver(u.plugin.host)
if err != nil || driver == nil {
klog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}
err = driver.Unmount(u.volName, mountPath)
if err != nil {
klog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err)
return err
}
return nil
}
func (util *portworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error {
driver, err := util.getPortworxDriver(volumeHost)
if err != nil || driver == nil {
klog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}
vols, err := driver.Inspect([]string{spec.Name()})
if err != nil {
return err
}
if len(vols) != 1 {
return fmt.Errorf("failed to inspect Portworx volume: %s. Found: %d volumes", spec.Name(), len(vols))
}
vol := vols[0]
newSizeInBytes := uint64(volumehelpers.RoundUpToGiB(newSize) * volumehelpers.GiB)
if vol.Spec.Size >= newSizeInBytes {
klog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+
"requested size: %d. Skipping resize.", spec.Name(), vol.Spec.Size, newSizeInBytes)
return nil
}
vol.Spec.Size = newSizeInBytes
err = driver.Set(spec.Name(), vol.Locator, vol.Spec)
if err != nil {
return err
}
// check if the volume's size actually got updated
vols, err = driver.Inspect([]string{spec.Name()})
if err != nil {
return err
}
if len(vols) != 1 {
return fmt.Errorf("failed to inspect resized Portworx volume: %s. Found: %d volumes", spec.Name(), len(vols))
}
updatedVol := vols[0]
if updatedVol.Spec.Size < vol.Spec.Size {
return fmt.Errorf("Portworx volume: %s doesn't match expected size after resize. expected:%v actual:%v",
spec.Name(), vol.Spec.Size, updatedVol.Spec.Size)
}
return nil
}
func isClientValid(client *osdclient.Client) (bool, error) {
if client == nil {
return false, nil
}
_, err := client.Versions(osdapi.OsdVolumePath)
if err != nil {
klog.Errorf("portworx client failed driver versions check. Err: %v", err)
return false, err
}
return true, nil
}
func createDriverClient(hostname string, port int32) (*osdclient.Client, error) {
client, err := volumeclient.NewDriverClient(fmt.Sprintf("http://%s:%d", hostname, port),
pxdDriverName, osdDriverVersion, pxDriverName)
if err != nil {
return nil, err
}
if isValid, err := isClientValid(client); isValid {
return client, nil
} else {
return nil, err
}
}
// getPortworxDriver returns a Portworx volume driver which can be used for cluster wide operations.
// Operations like create and delete volume don't need to be restricted to local volume host since
// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to
// the Portworx node that will own/owns the data.
func (util *portworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) {
// check if existing saved client is valid
if isValid, _ := isClientValid(util.portworxClient); isValid {
return volumeclient.VolumeDriver(util.portworxClient), nil
}
// create new client
var err error
util.portworxClient, err = createDriverClient(volumeHost.GetHostName(), osdMgmtDefaultPort) // for backward compatibility
if err != nil || util.portworxClient == nil {
// Create client from portworx k8s service.
svc, err := getPortworxService(volumeHost)
if err != nil {
return nil, err
}
// The port here is always the default one since it's the service port
util.portworxClient, err = createDriverClient(svc.Spec.ClusterIP, osdMgmtDefaultPort)
if err != nil || util.portworxClient == nil {
klog.Errorf("Failed to connect to portworx service. Err: %v", err)
return nil, err
}
klog.Infof("Using portworx cluster service at: %v:%d as api endpoint",
svc.Spec.ClusterIP, osdMgmtDefaultPort)
} else {
klog.Infof("Using portworx service at: %v:%d as api endpoint",
volumeHost.GetHostName(), osdMgmtDefaultPort)
}
return volumeclient.VolumeDriver(util.portworxClient), nil
}
// getLocalPortworxDriver returns driver connected to Portworx API server on volume host.
// This is required to force certain operations (mount, unmount, detach, attach) to
// go to the volume host instead of the k8s service which might route it to any host. This pertains to how
// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to
// see the pod container mounts (specifically /var/lib/kubelet/pods/<pod_id>)
func (util *portworxVolumeUtil) getLocalPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) {
if util.portworxClient != nil {
// check if existing saved client is valid
if isValid, _ := isClientValid(util.portworxClient); isValid {
return volumeclient.VolumeDriver(util.portworxClient), nil
}
}
// Lookup port
svc, err := getPortworxService(volumeHost)
if err != nil {
return nil, err
}
osgMgmtPort := lookupPXAPIPortFromService(svc)
util.portworxClient, err = createDriverClient(volumeHost.GetHostName(), osgMgmtPort)
if err != nil {
return nil, err
}
klog.Infof("Using portworx local service at: %v:%d as api endpoint",
volumeHost.GetHostName(), osgMgmtPort)
return volumeclient.VolumeDriver(util.portworxClient), nil
}
// lookupPXAPIPortFromService goes over all the ports in the given service and returns the target
// port for osdMgmtDefaultPort
func lookupPXAPIPortFromService(svc *v1.Service) int32 {
for _, p := range svc.Spec.Ports {
if p.Port == osdMgmtDefaultPort {
return p.TargetPort.IntVal
}
}
return osdMgmtDefaultPort // default
}
// getPortworxService returns the portworx cluster service from the API server
func getPortworxService(host volume.VolumeHost) (*v1.Service, error) {
kubeClient := host.GetKubeClient()
if kubeClient == nil {
err := fmt.Errorf("Failed to get kubeclient when creating portworx client")
klog.Errorf(err.Error())
return nil, err
}
opts := metav1.GetOptions{}
svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts)
if err != nil {
klog.Errorf("Failed to get service. Err: %v", err)
return nil, err
}
if svc == nil {
err = fmt.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName)
klog.Errorf(err.Error())
return nil, err
}
return svc, nil
}

View File

@ -1,60 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"quobyte.go",
"quobyte_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/quobyte",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/github.com/pborman/uuid:go_default_library",
"//vendor/github.com/quobyte/api:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["quobyte_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,12 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- rootfs
- saad-ali
reviewers:
- johscheuer
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42

View File

@ -1,19 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package quobyte contains the internal representation of Quobyte
// volumes.
package quobyte

View File

@ -1,501 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package quobyte
import (
"fmt"
"os"
"path"
gostrings "strings"
"github.com/pborman/uuid"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
utilstrings "k8s.io/utils/strings"
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&quobytePlugin{nil}}
}
type quobytePlugin struct {
host volume.VolumeHost
}
// This user is used to authenticate against the
// Quobyte API server and holds all information
type quobyteAPIConfig struct {
quobyteUser string
quobytePassword string
quobyteAPIServer string
}
var _ volume.VolumePlugin = &quobytePlugin{}
var _ volume.PersistentVolumePlugin = &quobytePlugin{}
var _ volume.DeletableVolumePlugin = &quobytePlugin{}
var _ volume.ProvisionableVolumePlugin = &quobytePlugin{}
var _ volume.Provisioner = &quobyteVolumeProvisioner{}
var _ volume.Deleter = &quobyteVolumeDeleter{}
const (
quobytePluginName = "kubernetes.io/quobyte"
)
func (plugin *quobytePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *quobytePlugin) GetPluginName() string {
return quobytePluginName
}
func (plugin *quobytePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return fmt.Sprintf(
"%v:%v",
volumeSource.Registry,
volumeSource.Volume), nil
}
func (plugin *quobytePlugin) CanSupport(spec *volume.Spec) bool {
if (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Quobyte == nil) ||
(spec.Volume != nil && spec.Volume.Quobyte == nil) {
return false
}
// If Quobyte is already mounted we don't need to check if the binary is installed
if mounter, err := plugin.newMounterInternal(spec, nil, plugin.host.GetMounter(plugin.GetPluginName())); err == nil {
qm, _ := mounter.(*quobyteMounter)
pluginDir := plugin.host.GetPluginDir(utilstrings.EscapeQualifiedName(quobytePluginName))
if mounted, err := qm.pluginDirIsMounted(pluginDir); mounted && err == nil {
klog.V(4).Infof("quobyte: can support")
return true
}
} else {
klog.V(4).Infof("quobyte: Error: %v", err)
}
exec := plugin.host.GetExec(plugin.GetPluginName())
if out, err := exec.Run("ls", "/sbin/mount.quobyte"); err == nil {
klog.V(4).Infof("quobyte: can support: %s", string(out))
return true
}
return false
}
func (plugin *quobytePlugin) IsMigratedToCSI() bool {
return false
}
func (plugin *quobytePlugin) RequiresRemount() bool {
return false
}
func (plugin *quobytePlugin) SupportsMountOption() bool {
return true
}
func (plugin *quobytePlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *quobytePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
func getVolumeSource(spec *volume.Spec) (*v1.QuobyteVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.Quobyte != nil {
return spec.Volume.Quobyte, spec.Volume.Quobyte.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Quobyte != nil {
return spec.PersistentVolume.Spec.Quobyte, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a Quobyte volume type")
}
func (plugin *quobytePlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
quobyteVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Quobyte: &v1.QuobyteVolumeSource{
Volume: volumeName,
},
},
}
return volume.NewSpecFromVolume(quobyteVolume), nil
}
func (plugin *quobytePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *quobytePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
source, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
return &quobyteMounter{
quobyte: &quobyte{
volName: spec.Name(),
user: source.User,
group: source.Group,
mounter: mounter,
pod: pod,
volume: source.Volume,
plugin: plugin,
},
registry: source.Registry,
readOnly: readOnly,
mountOptions: util.MountOptionFromSpec(spec),
}, nil
}
func (plugin *quobytePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *quobytePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &quobyteUnmounter{
&quobyte{
volName: volName,
mounter: mounter,
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
plugin: plugin,
},
}, nil
}
// Quobyte volumes represent a bare host directory mount of an quobyte export.
type quobyte struct {
volName string
pod *v1.Pod
user string
group string
volume string
tenant string
config string
mounter mount.Interface
plugin *quobytePlugin
volume.MetricsNil
}
type quobyteMounter struct {
*quobyte
registry string
readOnly bool
mountOptions []string
}
var _ volume.Mounter = &quobyteMounter{}
func (mounter *quobyteMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: mounter.readOnly,
Managed: false,
SupportsSELinux: false,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (mounter *quobyteMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (mounter *quobyteMounter) SetUp(fsGroup *int64) error {
pluginDir := mounter.plugin.host.GetPluginDir(utilstrings.EscapeQualifiedName(quobytePluginName))
return mounter.SetUpAt(pluginDir, fsGroup)
}
func (mounter *quobyteMounter) SetUpAt(dir string, fsGroup *int64) error {
// Check if Quobyte is already mounted on the host in the Plugin Dir
// if so we can use this mountpoint instead of creating a new one
// IsLikelyNotMountPoint wouldn't check the mount type
if mounted, err := mounter.pluginDirIsMounted(dir); err != nil {
return err
} else if mounted {
return nil
}
os.MkdirAll(dir, 0750)
var options []string
options = append(options, "allow-usermapping-in-volumename")
if mounter.readOnly {
options = append(options, "ro")
}
//if a trailing slash is missing we add it here
mountOptions := util.JoinMountOptions(mounter.mountOptions, options)
if err := mounter.mounter.Mount(mounter.correctTraillingSlash(mounter.registry), dir, "quobyte", mountOptions); err != nil {
return fmt.Errorf("quobyte: mount failed: %v", err)
}
klog.V(4).Infof("quobyte: mount set up: %s", dir)
return nil
}
// GetPath returns the path to the user specific mount of a Quobyte volume
// Returns a path in the format ../user#group@volume
func (quobyteVolume *quobyte) GetPath() string {
user := quobyteVolume.user
if len(user) == 0 {
user = "root"
}
group := quobyteVolume.group
if len(group) == 0 {
group = "nfsnobody"
}
// Quobyte has only one mount in the PluginDir where all Volumes are mounted
// The Quobyte client does a fixed-user mapping
pluginDir := quobyteVolume.plugin.host.GetPluginDir(utilstrings.EscapeQualifiedName(quobytePluginName))
return path.Join(pluginDir, fmt.Sprintf("%s#%s@%s", user, group, quobyteVolume.volume))
}
type quobyteUnmounter struct {
*quobyte
}
var _ volume.Unmounter = &quobyteUnmounter{}
func (unmounter *quobyteUnmounter) TearDown() error {
return unmounter.TearDownAt(unmounter.GetPath())
}
// We don't need to unmount on the host because only one mount exists
func (unmounter *quobyteUnmounter) TearDownAt(dir string) error {
return nil
}
type quobyteVolumeDeleter struct {
*quobyteMounter
pv *v1.PersistentVolume
}
func (plugin *quobytePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Quobyte == nil {
return nil, fmt.Errorf("spec.PersistentVolume.Spec.Quobyte is nil")
}
return plugin.newDeleterInternal(spec)
}
func (plugin *quobytePlugin) newDeleterInternal(spec *volume.Spec) (volume.Deleter, error) {
source, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
return &quobyteVolumeDeleter{
quobyteMounter: &quobyteMounter{
quobyte: &quobyte{
volName: spec.Name(),
user: source.User,
group: source.Group,
volume: source.Volume,
plugin: plugin,
tenant: source.Tenant,
},
registry: source.Registry,
readOnly: readOnly,
},
pv: spec.PersistentVolume,
}, nil
}
func (plugin *quobytePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options)
}
func (plugin *quobytePlugin) newProvisionerInternal(options volume.VolumeOptions) (volume.Provisioner, error) {
return &quobyteVolumeProvisioner{
quobyteMounter: &quobyteMounter{
quobyte: &quobyte{
plugin: plugin,
},
},
options: options,
}, nil
}
type quobyteVolumeProvisioner struct {
*quobyteMounter
options volume.VolumeOptions
}
func (provisioner *quobyteVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.AccessModesContainedInAll(provisioner.plugin.GetAccessModes(), provisioner.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", provisioner.options.PVC.Spec.AccessModes, provisioner.plugin.GetAccessModes())
}
if util.CheckPersistentVolumeClaimModeBlock(provisioner.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", provisioner.plugin.GetPluginName())
}
if provisioner.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim Selector is not supported")
}
provisioner.config = "BASE"
provisioner.tenant = "DEFAULT"
createQuota := false
cfg, err := parseAPIConfig(provisioner.plugin, provisioner.options.Parameters)
if err != nil {
return nil, err
}
for k, v := range provisioner.options.Parameters {
switch gostrings.ToLower(k) {
case "registry":
provisioner.registry = v
case "user":
provisioner.user = v
case "group":
provisioner.group = v
case "quobytetenant":
provisioner.tenant = v
case "quobyteconfig":
provisioner.config = v
case "createquota":
createQuota = gostrings.ToLower(v) == "true"
case "adminsecretname",
"adminsecretnamespace",
"quobyteapiserver":
continue
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, provisioner.plugin.GetPluginName())
}
}
if !validateRegistry(provisioner.registry) {
return nil, fmt.Errorf("Quobyte registry missing or malformed: must be a host:port pair or multiple pairs separated by commas")
}
// create random image name
provisioner.volume = fmt.Sprintf("kubernetes-dynamic-pvc-%s", uuid.NewUUID())
manager := &quobyteVolumeManager{
config: cfg,
}
vol, sizeGB, err := manager.createVolume(provisioner, createQuota)
if err != nil {
return nil, err
}
pv := new(v1.PersistentVolume)
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, util.VolumeDynamicallyCreatedByKey, "quobyte-dynamic-provisioner")
pv.Spec.PersistentVolumeSource.Quobyte = vol
pv.Spec.PersistentVolumeReclaimPolicy = provisioner.options.PersistentVolumeReclaimPolicy
pv.Spec.AccessModes = provisioner.options.PVC.Spec.AccessModes
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = provisioner.plugin.GetAccessModes()
}
pv.Spec.Capacity = v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
}
pv.Spec.MountOptions = provisioner.options.MountOptions
pv.Spec.PersistentVolumeSource.Quobyte.Tenant = provisioner.tenant
return pv, nil
}
func (deleter *quobyteVolumeDeleter) GetPath() string {
return deleter.quobyte.GetPath()
}
func (deleter *quobyteVolumeDeleter) Delete() error {
class, err := util.GetClassForVolume(deleter.plugin.host.GetKubeClient(), deleter.pv)
if err != nil {
return err
}
cfg, err := parseAPIConfig(deleter.plugin, class.Parameters)
if err != nil {
return err
}
manager := &quobyteVolumeManager{
config: cfg,
}
return manager.deleteVolume(deleter)
}
// Parse API configuration (url, username and password) out of class.Parameters.
func parseAPIConfig(plugin *quobytePlugin, params map[string]string) (*quobyteAPIConfig, error) {
var apiServer, secretName string
secretNamespace := "default"
deleteKeys := []string{}
for k, v := range params {
switch gostrings.ToLower(k) {
case "adminsecretname":
secretName = v
deleteKeys = append(deleteKeys, k)
case "adminsecretnamespace":
secretNamespace = v
deleteKeys = append(deleteKeys, k)
case "quobyteapiserver":
apiServer = v
deleteKeys = append(deleteKeys, k)
}
}
if len(apiServer) == 0 {
return nil, fmt.Errorf("Quobyte API server missing or malformed: must be a http(s)://host:port pair or multiple pairs separated by commas")
}
secretMap, err := util.GetSecretForPV(secretNamespace, secretName, quobytePluginName, plugin.host.GetKubeClient())
if err != nil {
return nil, err
}
cfg := &quobyteAPIConfig{
quobyteAPIServer: apiServer,
}
var ok bool
if cfg.quobyteUser, ok = secretMap["user"]; !ok {
return nil, fmt.Errorf("Missing \"user\" in secret %s/%s", secretNamespace, secretName)
}
if cfg.quobytePassword, ok = secretMap["password"]; !ok {
return nil, fmt.Errorf("Missing \"password\" in secret %s/%s", secretNamespace, secretName)
}
return cfg, nil
}

View File

@ -1,195 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package quobyte
import (
"fmt"
"os"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("quobyte_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/quobyte")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/quobyte" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("quobyte_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/quobyte")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func doTestPlugin(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("quobyte_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/quobyte")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*quobytePlugin).newMounterInternal(spec, pod, &mount.FakeMounter{})
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
if volumePath != fmt.Sprintf("%s/plugins/kubernetes.io~quobyte/root#root@vol", tmpDir) {
t.Errorf("Got unexpected path: %s expected: %s", volumePath, fmt.Sprintf("%s/plugins/kubernetes.io~quobyte/root#root@vol", tmpDir))
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
unmounter, err := plug.(*quobytePlugin).newUnmounterInternal("vol", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
// We don't need to check tear down, we don't unmount quobyte
}
func TestPluginVolume(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
tmpDir, err := utiltesting.MkTmpdir("quobyte_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(quobytePluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}

View File

@ -1,123 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package quobyte
import (
"net"
"os"
"path/filepath"
"strings"
"k8s.io/api/core/v1"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
quobyteapi "github.com/quobyte/api"
"k8s.io/klog"
)
type quobyteVolumeManager struct {
config *quobyteAPIConfig
}
func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProvisioner, createQuota bool) (quobyte *v1.QuobyteVolumeSource, size int, err error) {
capacity := provisioner.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
volumeSize, err := volumehelpers.RoundUpToGiBInt(capacity)
if err != nil {
return nil, 0, err
}
// Quobyte has the concept of Volumes which doen't have a specific size (they can grow unlimited)
// to simulate a size constraint we set here a Quota for logical space
volumeRequest := &quobyteapi.CreateVolumeRequest{
Name: provisioner.volume,
RootUserID: provisioner.user,
RootGroupID: provisioner.group,
TenantID: provisioner.tenant,
ConfigurationName: provisioner.config,
}
quobyteClient := manager.createQuobyteClient()
volumeUUID, err := quobyteClient.CreateVolume(volumeRequest)
if err != nil {
return &v1.QuobyteVolumeSource{}, volumeSize, err
}
// Set Quota for Volume with specified byte size
if createQuota {
err = quobyteClient.SetVolumeQuota(volumeUUID, uint64(capacity.Value()))
if err != nil {
return &v1.QuobyteVolumeSource{}, volumeSize, err
}
}
klog.V(4).Infof("Created Quobyte volume %s", provisioner.volume)
return &v1.QuobyteVolumeSource{
Registry: provisioner.registry,
Volume: provisioner.volume,
User: provisioner.user,
Group: provisioner.group,
}, volumeSize, nil
}
func (manager *quobyteVolumeManager) deleteVolume(deleter *quobyteVolumeDeleter) error {
return manager.createQuobyteClient().DeleteVolumeByName(deleter.volume, deleter.tenant)
}
func (manager *quobyteVolumeManager) createQuobyteClient() *quobyteapi.QuobyteClient {
return quobyteapi.NewQuobyteClient(
manager.config.quobyteAPIServer,
manager.config.quobyteUser,
manager.config.quobytePassword,
)
}
func (mounter *quobyteMounter) pluginDirIsMounted(pluginDir string) (bool, error) {
mounts, err := mounter.mounter.List()
if err != nil {
return false, err
}
for _, mountPoint := range mounts {
if strings.HasPrefix(mountPoint.Type, "quobyte") {
continue
}
if mountPoint.Path == pluginDir {
klog.V(4).Infof("quobyte: found mountpoint %s in /proc/mounts", mountPoint.Path)
return true, nil
}
}
return false, nil
}
func (mounter *quobyteMounter) correctTraillingSlash(regStr string) string {
return filepath.Clean(regStr) + string(os.PathSeparator)
}
func validateRegistry(registry string) bool {
if len(registry) == 0 {
return false
}
for _, hostPortPair := range strings.Split(registry, ",") {
if _, _, err := net.SplitHostPort(hostPortPair); err != nil {
return false
}
}
return true
}

View File

@ -1,72 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"disk_manager.go",
"doc.go",
"rbd.go",
"rbd_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/rbd",
deps = [
"//pkg/features:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/path:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["rbd_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,13 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- rootfs
- jsafrane
reviewers:
- sjenning
- saad-ali
- jsafrane
- rootfs
- jingxu97
- msau42
- cofyc

View File

@ -1,238 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"os"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
// NewAttacher implements AttachableVolumePlugin.NewAttacher.
func (plugin *rbdPlugin) NewAttacher() (volume.Attacher, error) {
return plugin.newAttacherInternal(&RBDUtil{})
}
// NewDeviceMounter implements DeviceMountableVolumePlugin.NewDeviceMounter
func (plugin *rbdPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *rbdPlugin) newAttacherInternal(manager diskManager) (volume.Attacher, error) {
return &rbdAttacher{
plugin: plugin,
manager: manager,
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
// NewDetacher implements AttachableVolumePlugin.NewDetacher.
func (plugin *rbdPlugin) NewDetacher() (volume.Detacher, error) {
return plugin.newDetacherInternal(&RBDUtil{})
}
// NewDeviceUnmounter implements DeviceMountableVolumePlugin.NewDeviceUnmounter
func (plugin *rbdPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (plugin *rbdPlugin) newDetacherInternal(manager diskManager) (volume.Detacher, error) {
return &rbdDetacher{
plugin: plugin,
manager: manager,
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
// GetDeviceMountRefs implements AttachableVolumePlugin.GetDeviceMountRefs.
func (plugin *rbdPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mounter.GetMountRefs(deviceMountPath)
}
func (plugin *rbdPlugin) CanAttach(spec *volume.Spec) bool {
return true
}
// rbdAttacher implements volume.Attacher interface.
type rbdAttacher struct {
plugin *rbdPlugin
mounter *mount.SafeFormatAndMount
manager diskManager
}
var _ volume.Attacher = &rbdAttacher{}
var _ volume.DeviceMounter = &rbdAttacher{}
// Attach implements Attacher.Attach.
// We do not lock image here, because it requires kube-controller-manager to
// access external `rbd` utility. And there is no need since AttachDetach
// controller will not try to attach RWO volumes which are already attached to
// other nodes.
func (attacher *rbdAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
return "", nil
}
// VolumesAreAttached implements Attacher.VolumesAreAttached.
// There is no way to confirm whether the volume is attached or not from
// outside of the kubelet node. This method needs to return true always, like
// iSCSI, FC plugin.
func (attacher *rbdAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
for _, spec := range specs {
volumesAttachedCheck[spec] = true
}
return volumesAttachedCheck, nil
}
// WaitForAttach implements Attacher.WaitForAttach. It's called by kubelet to
// attach volume onto the node.
// This method is idempotent, callers are responsible for retrying on failure.
func (attacher *rbdAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
klog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID)
mounter, err := attacher.plugin.createMounterFromVolumeSpecAndPod(spec, pod)
if err != nil {
klog.Warningf("failed to create mounter: %v", spec)
return "", err
}
realDevicePath, err := attacher.manager.AttachDisk(*mounter)
if err != nil {
return "", err
}
klog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath)
return realDevicePath, nil
}
// GetDeviceMountPath implements Attacher.GetDeviceMountPath.
func (attacher *rbdAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
img, err := getVolumeSourceImage(spec)
if err != nil {
return "", err
}
pool, err := getVolumeSourcePool(spec)
if err != nil {
return "", err
}
return makePDNameInternal(attacher.plugin.host, pool, img), nil
}
// MountDevice implements Attacher.MountDevice. It is called by the kubelet to
// mount device at the given mount path.
// This method is idempotent, callers are responsible for retrying on failure.
func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
klog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath)
notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
if !notMnt {
return nil
}
fstype, err := getVolumeSourceFSType(spec)
if err != nil {
return err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return err
}
options := []string{}
if ro {
options = append(options, "ro")
}
mountOptions := volutil.MountOptionFromSpec(spec, options...)
err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return fmt.Errorf("rbd: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err)
}
klog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype)
return nil
}
// rbdDetacher implements volume.Detacher interface.
type rbdDetacher struct {
plugin *rbdPlugin
manager diskManager
mounter *mount.SafeFormatAndMount
}
var _ volume.Detacher = &rbdDetacher{}
var _ volume.DeviceUnmounter = &rbdDetacher{}
// UnmountDevice implements Detacher.UnmountDevice. It unmounts the global
// mount of the RBD image. This is called once all bind mounts have been
// unmounted.
// Internally, it does four things:
// - Unmount device from deviceMountPath.
// - Detach device from the node.
// - Remove lock if found. (No need to check volume readonly or not, because
// device is not on the node anymore, it's safe to remove lock.)
// - Remove the deviceMountPath at last.
// This method is idempotent, callers are responsible for retrying on failure.
func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error {
if pathExists, pathErr := mount.PathExists(deviceMountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
return nil
}
devicePath, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
if err != nil {
return err
}
// Unmount the device from the device mount point.
klog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath)
if err = detacher.mounter.Unmount(deviceMountPath); err != nil {
return err
}
klog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath)
klog.V(4).Infof("rbd: detaching device %s", devicePath)
err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath)
if err != nil {
return err
}
klog.V(3).Infof("rbd: successfully detach device %s", devicePath)
err = os.Remove(deviceMountPath)
if err != nil {
return err
}
klog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath)
return nil
}
// Detach implements Detacher.Detach.
func (detacher *rbdDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}

View File

@ -1,134 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// diskManager interface and diskSetup/TearDown functions abstract commonly used procedures to setup a block volume
// rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume unmounter.
// TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD
//
package rbd
import (
"fmt"
"os"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// Abstract interface to disk operations.
type diskManager interface {
// MakeGlobalPDName creates global persistent disk path.
MakeGlobalPDName(disk rbd) string
// MakeGlobalVDPDName creates global block disk path.
MakeGlobalVDPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine.
// If it successfully attaches, the path to the device
// is returned. Otherwise, an error will be returned.
AttachDisk(disk rbdMounter) (string, error)
// Detaches the disk from the kubelet's host machine.
DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error
// Detaches the block disk from the kubelet's host machine.
DetachBlockDisk(disk rbdDiskUnmapper, mntPath string) error
// Creates a rbd image.
CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error)
// Deletes a rbd image.
DeleteImage(deleter *rbdVolumeDeleter) error
// Expands a rbd image
ExpandImage(expander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
globalPDPath := manager.MakeGlobalPDName(*b.rbd)
notMnt, err := mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("cannot validate mountpoint: %s", globalPDPath)
return err
}
if notMnt {
return fmt.Errorf("no device is mounted at %s", globalPDPath)
}
notMnt, err = mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if !notMnt {
return nil
}
if err := os.MkdirAll(volPath, 0750); err != nil {
klog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if (&b).GetAttributes().ReadOnly {
options = append(options, "ro")
}
mountOptions := util.JoinMountOptions(b.mountOptions, options)
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
if err != nil {
klog.Errorf("failed to bind mount:%s", globalPDPath)
return err
}
klog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions)
if !b.ReadOnly {
volume.SetVolumeOwnership(&b, fsGroup)
}
return nil
}
// utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter mount.Interface) error {
notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if notMnt {
klog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath)
return os.Remove(volPath)
}
// Unmount the bind-mount inside this pod.
if err := mounter.Unmount(volPath); err != nil {
klog.Errorf("failed to umount %s", volPath)
return err
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil && !os.IsNotExist(mntErr) {
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return mntErr
}
if notMnt {
if err := os.Remove(volPath); err != nil {
klog.V(2).Info("Error removing mountpoint ", volPath, ": ", err)
return err
}
}
return nil
}

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rbd contains the internal representation of Rados Block Store (Ceph)
// volumes.
package rbd // import "k8s.io/kubernetes/pkg/volume/rbd"

File diff suppressed because it is too large Load Diff

View File

@ -1,709 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
testVolName = "vol-1234"
testRBDImage = "volume-a4b47414-a675-47dc-a9cc-c223f13439b0"
testRBDPool = "volumes"
testGlobalPath = "plugins/kubernetes.io/rbd/volumeDevices/volumes-image-volume-a4b47414-a675-47dc-a9cc-c223f13439b0"
)
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// make our test path for fake GlobalMapPath
// /tmp symbolized our pluginDir
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/rbd/volumeDevices/pdVol1
tmpVDir, err := utiltesting.MkTmpdir("rbdBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
//Bad Path
badspec, err := getVolumeSpecFromGlobalMapPath("", testVolName)
if badspec != nil || err == nil {
t.Fatalf("Expected not to get spec from GlobalMapPath but did")
}
// Good Path
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath, testVolName)
if spec == nil || err != nil {
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
}
if spec.PersistentVolume.Name != testVolName {
t.Errorf("Invalid spec name for GlobalMapPath spec: %s", spec.PersistentVolume.Name)
}
if spec.PersistentVolume.Spec.RBD.RBDPool != testRBDPool {
t.Errorf("Invalid RBDPool from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.RBD.RBDPool)
}
if spec.PersistentVolume.Spec.RBD.RBDImage != testRBDImage {
t.Errorf("Invalid RBDImage from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.RBD.RBDImage)
}
block := v1.PersistentVolumeBlock
specMode := spec.PersistentVolume.Spec.VolumeMode
if &specMode == nil {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v - %v", &specMode, block)
}
if *specMode != block {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v - %v", *specMode, block)
}
}
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/rbd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{RBD: &v1.RBDVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{RBD: &v1.RBDPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
type fakeDiskManager struct {
// Make sure we can run tests in parallel.
mutex sync.RWMutex
// Key format: "<pool>/<image>"
rbdImageLocks map[string]bool
rbdMapIndex int
rbdDevices map[string]bool
}
func NewFakeDiskManager() *fakeDiskManager {
return &fakeDiskManager{
rbdImageLocks: make(map[string]bool),
rbdMapIndex: 0,
rbdDevices: make(map[string]bool),
}
}
func (fake *fakeDiskManager) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) MakeGlobalVDPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) AttachDisk(b rbdMounter) (string, error) {
fake.mutex.Lock()
defer fake.mutex.Unlock()
fake.rbdMapIndex++
devicePath := fmt.Sprintf("/dev/rbd%d", fake.rbdMapIndex)
fake.rbdDevices[devicePath] = true
return devicePath, nil
}
func (fake *fakeDiskManager) DetachDisk(r *rbdPlugin, deviceMountPath string, device string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
ok := fake.rbdDevices[device]
if !ok {
return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device)
}
delete(fake.rbdDevices, device)
return nil
}
func (fake *fakeDiskManager) DetachBlockDisk(r rbdDiskUnmapper, device string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
ok := fake.rbdDevices[device]
if !ok {
return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device)
}
delete(fake.rbdDevices, device)
return nil
}
func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error) {
return nil, 0, fmt.Errorf("not implemented")
}
func (fake *fakeDiskManager) DeleteImage(deleter *rbdVolumeDeleter) error {
return fmt.Errorf("not implemented")
}
func (fake *fakeDiskManager) Fencing(r rbdMounter, nodeName string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
if ok && isLocked {
// not expected in testing
return fmt.Errorf("%s is already locked", key)
}
fake.rbdImageLocks[key] = true
return nil
}
func (fake *fakeDiskManager) Defencing(r rbdMounter, nodeName string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
if !ok || !isLocked {
// not expected in testing
return fmt.Errorf("%s is not locked", key)
}
delete(fake.rbdImageLocks, key)
return nil
}
func (fake *fakeDiskManager) IsLocked(r rbdMounter, nodeName string) (bool, error) {
fake.mutex.RLock()
defer fake.mutex.RUnlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
return ok && isLocked, nil
}
func (fake *fakeDiskManager) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
return resource.Quantity{}, fmt.Errorf("not implemented")
}
// checkMounterLog checks fakeMounter must have expected logs, and the last action msut equal to expectedAction.
func checkMounterLog(t *testing.T, fakeMounter *mount.FakeMounter, expected int, expectedAction mount.FakeAction) {
if len(fakeMounter.Log) != expected {
t.Fatalf("fakeMounter should have %d logs, actual: %d", expected, len(fakeMounter.Log))
}
lastIndex := len(fakeMounter.Log) - 1
lastAction := fakeMounter.Log[lastIndex]
if !reflect.DeepEqual(expectedAction, lastAction) {
t.Fatalf("fakeMounter.Log[%d] should be %#v, not: %#v", lastIndex, expectedAction, lastAction)
}
}
func doTestPlugin(t *testing.T, c *testcase) {
fakeVolumeHost := volumetest.NewFakeVolumeHost(c.root, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter)
fakeNodeName := types.NodeName("localhost")
fdm := NewFakeDiskManager()
// attacher
attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
deviceAttachPath, err := attacher.Attach(c.spec, fakeNodeName)
if err != nil {
t.Fatal(err)
}
devicePath, err := attacher.WaitForAttach(c.spec, deviceAttachPath, c.pod, time.Second*10)
if err != nil {
t.Fatal(err)
}
if devicePath != c.expectedDevicePath {
t.Errorf("Unexpected path, expected %q, not: %q", c.expectedDevicePath, devicePath)
}
deviceMountPath, err := attacher.GetDeviceMountPath(c.spec)
if err != nil {
t.Fatal(err)
}
if deviceMountPath != c.expectedDeviceMountPath {
t.Errorf("Unexpected mount path, expected %q, not: %q", c.expectedDeviceMountPath, deviceMountPath)
}
err = attacher.MountDevice(c.spec, devicePath, deviceMountPath)
if err != nil {
t.Fatal(err)
}
if _, err := os.Stat(deviceMountPath); err != nil {
if os.IsNotExist(err) {
t.Errorf("Attacher.MountDevice() failed, device mount path not created: %s", deviceMountPath)
} else {
t.Errorf("Attacher.MountDevice() failed: %v", err)
}
}
checkMounterLog(t, fakeMounter, 1, mount.FakeAction{Action: "mount", Target: c.expectedDeviceMountPath, Source: devicePath, FSType: "ext4"})
// mounter
mounter, err := plug.(*rbdPlugin).newMounterInternal(c.spec, c.pod.UID, fdm, "secrets")
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
path := mounter.GetPath()
if path != c.expectedPodMountPath {
t.Errorf("Unexpected path, expected %q, got: %q", c.expectedPodMountPath, path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
checkMounterLog(t, fakeMounter, 2, mount.FakeAction{Action: "mount", Target: c.expectedPodMountPath, Source: devicePath, FSType: ""})
// unmounter
unmounter, err := plug.(*rbdPlugin).newUnmounterInternal(c.spec.Name(), c.pod.UID, fdm)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
checkMounterLog(t, fakeMounter, 3, mount.FakeAction{Action: "unmount", Target: c.expectedPodMountPath, Source: "", FSType: ""})
// detacher
detacher, err := plug.(*rbdPlugin).newDetacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
err = detacher.UnmountDevice(deviceMountPath)
if err != nil {
t.Fatalf("Detacher.UnmountDevice failed to unmount %s", deviceMountPath)
}
checkMounterLog(t, fakeMounter, 4, mount.FakeAction{Action: "unmount", Target: c.expectedDeviceMountPath, Source: "", FSType: ""})
err = detacher.Detach(deviceMountPath, fakeNodeName)
if err != nil {
t.Fatalf("Detacher.Detach failed to detach %s from %s", deviceMountPath, fakeNodeName)
}
}
type testcase struct {
spec *volume.Spec
root string
pod *v1.Pod
expectedDevicePath string
expectedDeviceMountPath string
expectedPodMountPath string
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
tmpDir, err = filepath.EvalSymlinks(tmpDir)
if err != nil {
t.Fatal(err)
}
podUID := uuid.NewUUID()
var cases []*testcase
cases = append(cases, &testcase{
spec: volume.NewSpecFromVolume(&v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "pool1",
RBDImage: "image1",
FSType: "ext4",
ReadOnly: true,
},
},
}),
root: tmpDir,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "testns",
UID: podUID,
},
},
expectedDevicePath: "/dev/rbd1",
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool1-image-image1", tmpDir),
expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol1", tmpDir, podUID),
})
cases = append(cases, &testcase{
spec: volume.NewSpecFromPersistentVolume(&v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol2",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "pool2",
RBDImage: "image2",
FSType: "ext4",
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
},
}, false),
root: tmpDir,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "testns",
UID: podUID,
},
},
expectedDevicePath: "/dev/rbd1",
expectedDeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool2-image-image2", tmpDir),
expectedPodMountPath: fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~rbd/vol2", tmpDir, podUID),
})
for i := 0; i < len(cases); i++ {
doTestPlugin(t, cases[i])
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(rbdPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestGetSecretNameAndNamespace(t *testing.T) {
secretName := "test-secret-name"
secretNamespace := "test-secret-namespace"
volSpec := &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
secretRef := new(v1.SecretReference)
secretRef.Name = secretName
secretRef.Namespace = secretNamespace
volSpec.PersistentVolume.Spec.PersistentVolumeSource.RBD.SecretRef = secretRef
foundSecretName, foundSecretNamespace, err := getSecretNameAndNamespace(volSpec, "default")
if err != nil {
t.Errorf("getSecretNameAndNamespace failed to get Secret's name and namespace: %v", err)
}
if strings.Compare(secretName, foundSecretName) != 0 || strings.Compare(secretNamespace, foundSecretNamespace) != 0 {
t.Errorf("getSecretNameAndNamespace returned incorrect values, expected %s and %s but got %s and %s", secretName, secretNamespace, foundSecretName, foundSecretNamespace)
}
}
// https://github.com/kubernetes/kubernetes/issues/57744
func TestGetDeviceMountPath(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fdm := NewFakeDiskManager()
// attacher
attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
pool, image := "pool", "image"
spec := volume.NewSpecFromVolume(&v1.Volume{
Name: "vol",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: pool,
RBDImage: image,
FSType: "ext4",
},
},
})
deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image)
canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image)
type testCase struct {
deprecated bool
targetPath string
}
for _, c := range []testCase{
{false, canonicalDir},
{true, deprecatedDir},
} {
if c.deprecated {
// This is a deprecated device mount path, we create it,
// and hope attacher.GetDeviceMountPath return c.targetPath.
if err := os.MkdirAll(c.targetPath, 0700); err != nil {
t.Fatalf("Create deprecated mount path failed: %v", err)
}
}
mountPath, err := attacher.GetDeviceMountPath(spec)
if err != nil {
t.Fatalf("GetDeviceMountPath failed: %v", err)
}
if mountPath != c.targetPath {
t.Errorf("Mismatch device mount path: wanted %s, got %s", c.targetPath, mountPath)
}
}
}
// https://github.com/kubernetes/kubernetes/issues/57744
func TestConstructVolumeSpec(t *testing.T) {
if runtime.GOOS == "darwin" {
t.Skipf("TestConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
}
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeVolumeHost(tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter)
pool, image, volumeName := "pool", "image", "vol"
podMountPath := fmt.Sprintf("%s/pods/pod123/volumes/kubernetes.io~rbd/%s", tmpDir, volumeName)
deprecatedDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/%s-image-%s", tmpDir, pool, image)
canonicalDir := fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/%s-image-%s", tmpDir, pool, image)
type testCase struct {
volumeName string
targetPath string
}
for _, c := range []testCase{
{"vol", canonicalDir},
{"vol", deprecatedDir},
} {
if err := os.MkdirAll(c.targetPath, 0700); err != nil {
t.Fatalf("Create mount path %s failed: %v", c.targetPath, err)
}
if err = fakeMounter.Mount("/dev/rbd0", c.targetPath, "fake", nil); err != nil {
t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err)
}
if err = fakeMounter.Mount(c.targetPath, podMountPath, "fake", []string{"bind"}); err != nil {
t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err)
}
spec, err := plug.ConstructVolumeSpec(c.volumeName, podMountPath)
if err != nil {
t.Errorf("ConstructVolumeSpec failed: %v", err)
} else {
if spec.Volume.RBD.RBDPool != pool {
t.Errorf("Mismatch rbd pool: wanted %s, got %s", pool, spec.Volume.RBD.RBDPool)
}
if spec.Volume.RBD.RBDImage != image {
t.Fatalf("Mismatch rbd image: wanted %s, got %s", image, spec.Volume.RBD.RBDImage)
}
}
if err = fakeMounter.Unmount(podMountPath); err != nil {
t.Fatalf("Unmount pod path %s failed: %v", podMountPath, err)
}
if err = fakeMounter.Unmount(c.targetPath); err != nil {
t.Fatalf("Unmount device path %s failed: %v", c.targetPath, err)
}
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
modes := plug.GetAccessModes()
for _, v := range modes {
if !volumetest.ContainsAccessMode(modes, v) {
t.Errorf("Expected AccessModeTypes: %s", v)
}
}
}
func TestRequiresRemount(t *testing.T) {
tmpDir, _ := utiltesting.MkTmpdir("rbd_test")
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, _ := plugMgr.FindPluginByName("kubernetes.io/rbd")
has := plug.RequiresRemount()
if has {
t.Errorf("Exepcted RequiresRemount to be false, got %t", has)
}
}
func TestGetRbdImageSize(t *testing.T) {
for i, c := range []struct {
Output string
TargetSize int
}{
{
Output: `{"name":"kubernetes-dynamic-pvc-18e7a4d9-050d-11e9-b905-548998f3478f","size":10737418240,"objects":2560,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.9f4ff7238e1f29","format":2}`,
TargetSize: 10240,
},
{
Output: `{"name":"kubernetes-dynamic-pvc-070635bf-e33f-11e8-aab7-548998f3478f","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.670ac4238e1f29","format":2}`,
TargetSize: 1024,
},
} {
size, err := getRbdImageSize([]byte(c.Output))
if err != nil {
t.Errorf("Case %d: getRbdImageSize failed: %v", i, err)
continue
}
if size != c.TargetSize {
t.Errorf("Case %d: unexpected size, wanted %d, got %d", i, c.TargetSize, size)
}
}
}

View File

@ -1,790 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// utility functions to setup rbd volume
// mainly implement diskManager interface
//
package rbd
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume"
utilpath "k8s.io/utils/path"
)
const (
imageWatcherStr = "watcher="
imageSizeStr = "size "
kubeLockMagic = "kubelet_lock_magic_"
// The following three values are used for 30 seconds timeout
// while waiting for RBD Watcher to expire.
rbdImageWatcherInitDelay = 1 * time.Second
rbdImageWatcherFactor = 1.4
rbdImageWatcherSteps = 10
rbdImageSizeUnitMiB = 1024 * 1024
)
func getDevFromImageAndPool(pool, image string) (string, bool) {
device, found := getRbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
device, found = getNbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
return "", false
}
// Search /sys/bus for rbd device that matches given pool and image.
func getRbdDevFromImageAndPool(pool string, image string) (string, bool) {
// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
sys_path := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sys_path); err == nil {
for _, f := range dirs {
// Pool and name format:
// see rbd_pool_show() and rbd_name_show() at
// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
name := f.Name()
// First match pool, then match name.
poolFile := path.Join(sys_path, name, "pool")
poolBytes, err := ioutil.ReadFile(poolFile)
if err != nil {
klog.V(4).Infof("error reading %s: %v", poolFile, err)
continue
}
if strings.TrimSpace(string(poolBytes)) != pool {
klog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes))
continue
}
imgFile := path.Join(sys_path, name, "name")
imgBytes, err := ioutil.ReadFile(imgFile)
if err != nil {
klog.V(4).Infof("error reading %s: %v", imgFile, err)
continue
}
if strings.TrimSpace(string(imgBytes)) != image {
klog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes))
continue
}
// Found a match, check if device exists.
devicePath := "/dev/rbd" + name
if _, err := os.Lstat(devicePath); err == nil {
return devicePath, true
}
}
}
return "", false
}
func getMaxNbds() (int, error) {
// the max number of nbd devices may be found in maxNbdsPath
// we will check sysfs for possible nbd devices even if this is not available
maxNbdsPath := "/sys/module/nbd/parameters/nbds_max"
_, err := os.Lstat(maxNbdsPath)
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to retrieve max_nbds from %s err: %q", maxNbdsPath, err)
}
klog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath)
maxNbdBytes, err := ioutil.ReadFile(maxNbdsPath)
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds from %s err: %q", maxNbdsPath, err)
}
maxNbds, err := strconv.Atoi(strings.TrimSpace(string(maxNbdBytes)))
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds err: %q", err)
}
klog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds)
return maxNbds, nil
}
// Locate any existing rbd-nbd process mapping given a <pool, image>.
// Recent versions of rbd-nbd tool can correctly provide this info using list-mapped
// but older versions of list-mapped don't.
// The implementation below peeks at the command line of nbd bound processes
// to figure out any mapped images.
func getNbdDevFromImageAndPool(pool string, image string) (string, bool) {
// nbd module exports the pid of serving process in sysfs
basePath := "/sys/block/nbd"
// Do not change imgPath format - some tools like rbd-nbd are strict about it.
imgPath := fmt.Sprintf("%s/%s", pool, image)
maxNbds, maxNbdsErr := getMaxNbds()
if maxNbdsErr != nil {
klog.V(4).Infof("error reading nbds_max %v", maxNbdsErr)
return "", false
}
for i := 0; i < maxNbds; i++ {
nbdPath := basePath + strconv.Itoa(i)
_, err := os.Lstat(nbdPath)
if err != nil {
klog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err)
continue
}
pidBytes, err := ioutil.ReadFile(path.Join(nbdPath, "pid"))
if err != nil {
klog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err)
continue
}
cmdlineFileName := path.Join("/proc", strings.TrimSpace(string(pidBytes)), "cmdline")
rawCmdline, err := ioutil.ReadFile(cmdlineFileName)
if err != nil {
klog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err)
continue
}
cmdlineArgs := strings.FieldsFunc(string(rawCmdline), func(r rune) bool {
return r == '\u0000'
})
// Check if this process is mapping a rbd device.
// Only accepted pattern of cmdline is from execRbdMap:
// rbd-nbd map pool/image ...
if len(cmdlineArgs) < 3 || cmdlineArgs[0] != "rbd-nbd" || cmdlineArgs[1] != "map" {
klog.V(4).Infof("nbd device %s is not used by rbd", nbdPath)
continue
}
if cmdlineArgs[2] != imgPath {
klog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s",
nbdPath, imgPath, cmdlineArgs[2])
continue
}
devicePath := path.Join("/dev", "nbd"+strconv.Itoa(i))
if _, err := os.Lstat(devicePath); err != nil {
klog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err)
continue
}
return devicePath, true
}
return "", false
}
// Stat a path, if it doesn't exist, retry maxRetries times.
func waitForPath(pool, image string, maxRetries int, useNbdDriver bool) (string, bool) {
for i := 0; i < maxRetries; i++ {
if i != 0 {
time.Sleep(time.Second)
}
if useNbdDriver {
if devicePath, found := getNbdDevFromImageAndPool(pool, image); found {
return devicePath, true
}
} else {
if devicePath, found := getRbdDevFromImageAndPool(pool, image); found {
return devicePath, true
}
}
}
return "", false
}
// Execute command to map a rbd device for mounter.
// rbdCmd is driver dependent and either "rbd" or "rbd-nbd".
func execRbdMap(b rbdMounter, rbdCmd string, mon string) ([]byte, error) {
// Commandline: rbdCmd map imgPath ...
// do not change this format - some tools like rbd-nbd are strict about it.
imgPath := fmt.Sprintf("%s/%s", b.Pool, b.Image)
if b.Secret != "" {
return b.exec.Run(rbdCmd,
"map", imgPath, "--id", b.Id, "-m", mon, "--key="+b.Secret)
} else {
return b.exec.Run(rbdCmd,
"map", imgPath, "--id", b.Id, "-m", mon, "-k", b.Keyring)
}
}
// Check if rbd-nbd tools are installed.
func checkRbdNbdTools(e mount.Exec) bool {
_, err := e.Run("modprobe", "nbd")
if err != nil {
klog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err)
return false
}
if _, err := e.Run("rbd-nbd", "--version"); err != nil {
klog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err)
return false
}
klog.V(3).Infof("rbd-nbd tools were found.")
return true
}
// Make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/pool-image-image.
func makePDNameInternal(host volume.VolumeHost, pool string, image string) string {
// Backward compatibility for the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/pool-image-image.
deprecatedDir := path.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image)
info, err := os.Stat(deprecatedDir)
if err == nil && info.IsDir() {
// The device mount path has already been created with the deprecated format, return it.
klog.V(5).Infof("Deprecated format path %s found", deprecatedDir)
return deprecatedDir
}
// Return the canonical format path.
return path.Join(host.GetPluginDir(rbdPluginName), mount.MountsInGlobalPDPath, pool+"-image-"+image)
}
// Make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/volumeDevices/pool-image-image.
func makeVDPDNameInternal(host volume.VolumeHost, pool string, image string) string {
return path.Join(host.GetVolumeDevicePluginDir(rbdPluginName), pool+"-image-"+image)
}
// RBDUtil implements diskManager interface.
type RBDUtil struct{}
var _ diskManager = &RBDUtil{}
func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (util *RBDUtil) MakeGlobalVDPDName(rbd rbd) string {
return makeVDPDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func rbdErrors(runErr, resultErr error) error {
if err, ok := runErr.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
return fmt.Errorf("rbd: rbd cmd not found")
}
}
return resultErr
}
// 'rbd' utility builds a comma-separated list of monitor addresses from '-m' /
// '--mon_host` parameter (comma, semi-colon, or white-space delimited monitor
// addresses) and send it to kernel rbd/libceph modules, which can accept
// comma-seprated list of monitor addresses (e.g. ip1[:port1][,ip2[:port2]...])
// in their first version in linux (see
// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/ceph_common.c#L239).
// Also, libceph module chooses monitor randomly, so we can simply pass all
// addresses without randomization (see
// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/mon_client.c#L132).
func (util *RBDUtil) kernelRBDMonitorsOpt(mons []string) string {
return strings.Join(mons, ",")
}
// rbdUnlock releases a lock on image if found.
func (util *RBDUtil) rbdUnlock(b rbdMounter) error {
var err error
var output, locker string
var cmd []byte
var secret_opt []string
if b.Secret != "" {
secret_opt = []string{"--key=" + b.Secret}
} else {
secret_opt = []string{"-k", b.Keyring}
}
if len(b.adminId) == 0 {
b.adminId = b.Id
}
if len(b.adminSecret) == 0 {
b.adminSecret = b.Secret
}
// Construct lock id using host name and a magic prefix.
hostName, err := node.GetHostname("")
if err != nil {
return err
}
lock_id := kubeLockMagic + hostName
mon := util.kernelRBDMonitorsOpt(b.Mon)
// Get the locker name, something like "client.1234".
args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
output = string(cmd)
klog.V(4).Infof("lock list output %q", output)
if err != nil {
return err
}
ind := strings.LastIndex(output, lock_id) - 1
for i := ind; i >= 0; i-- {
if output[i] == '\n' {
locker = output[(i + 1):ind]
break
}
}
// Remove a lock if found: rbd lock remove.
if len(locker) > 0 {
args := []string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon}
args = append(args, secret_opt...)
cmd, err = b.exec.Run("rbd", args...)
if err == nil {
klog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon)
} else {
klog.Warningf("rbd: failed to remove lock (lock_id: %s) on image: %s/%s with id %s mon %s: %v", lock_id, b.Pool, b.Image, b.Id, mon, err)
}
}
return err
}
// AttachDisk attaches the disk on the node.
func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) {
var output []byte
globalPDPath := util.MakeGlobalPDName(*b.rbd)
if pathExists, pathErr := mount.PathExists(globalPDPath); pathErr != nil {
return "", fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return "", err
}
}
// Evalute whether this device was mapped with rbd.
devicePath, mapped := waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, false /*useNbdDriver*/)
// If rbd-nbd tools are found, we will fallback to it should the default krbd driver fail.
nbdToolsFound := false
if !mapped {
nbdToolsFound = checkRbdNbdTools(b.exec)
if nbdToolsFound {
devicePath, mapped = waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, true /*useNbdDriver*/)
}
}
if !mapped {
// Currently, we don't acquire advisory lock on image, but for backward
// compatibility, we need to check if the image is being used by nodes running old kubelet.
// osd_client_watch_timeout defaults to 30 seconds, if the watcher stays active longer than 30 seconds,
// rbd image does not get mounted and failure message gets generated.
backoff := wait.Backoff{
Duration: rbdImageWatcherInitDelay,
Factor: rbdImageWatcherFactor,
Steps: rbdImageWatcherSteps,
}
needValidUsed := true
if b.accessModes != nil {
// If accessModes only contains ReadOnlyMany, we don't need check rbd status of being used.
if len(b.accessModes) == 1 && b.accessModes[0] == v1.ReadOnlyMany {
needValidUsed = false
}
}
// If accessModes is nil, the volume is referenced by in-line volume.
// We can assume the AccessModes to be {"RWO" and "ROX"}, which is what the volume plugin supports.
// We do not need to consider ReadOnly here, because it is used for VolumeMounts.
if needValidUsed {
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
used, rbdOutput, err := util.rbdStatus(&b)
if err != nil {
return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput)
}
return !used, nil
})
// Return error if rbd image has not become available for the specified timeout.
if err == wait.ErrWaitTimeout {
return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image)
}
// Return error if any other errors were encountered during waiting for the image to become available.
if err != nil {
return "", err
}
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
klog.V(1).Infof("rbd: map mon %s", mon)
_, err := b.exec.Run("modprobe", "rbd")
if err != nil {
klog.Warningf("rbd: failed to load rbd kernel module:%v", err)
}
output, err = execRbdMap(b, "rbd", mon)
if err != nil {
if !nbdToolsFound {
klog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
}
klog.V(3).Infof("rbd: map failed with %v, %s. Retrying with rbd-nbd", err, string(output))
errList := []error{err}
outputList := output
output, err = execRbdMap(b, "rbd-nbd", mon)
if err != nil {
errList = append(errList, err)
outputList = append(outputList, output...)
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", errors.NewAggregate(errList), string(outputList))
}
devicePath, mapped = waitForPath(b.Pool, b.Image, 10 /*maxRetries*/, true /*useNbdDrive*/)
} else {
devicePath, mapped = waitForPath(b.Pool, b.Image, 10 /*maxRetries*/, false /*useNbdDriver*/)
}
if !mapped {
return "", fmt.Errorf("Could not map image %s/%s, Timeout after 10s", b.Pool, b.Image)
}
}
return devicePath, nil
}
// DetachDisk detaches the disk from the node.
// It detaches device from the node if device is provided, and removes the lock
// if there is persisted RBD info under deviceMountPath.
func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error {
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
exec := plugin.host.GetExec(plugin.GetPluginName())
var rbdCmd string
// Unlike map, we cannot fallthrough for unmap
// the tool to unmap is based on device type
if strings.HasPrefix(device, "/dev/nbd") {
rbdCmd = "rbd-nbd"
} else {
rbdCmd = "rbd"
}
// rbd unmap
output, err := exec.Run(rbdCmd, "unmap", device)
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %v", device, err, output))
}
klog.V(3).Infof("rbd: successfully unmap device %s", device)
// Currently, we don't persist rbd info on the disk, but for backward
// compatbility, we need to clean it if found.
rbdFile := path.Join(deviceMountPath, "rbd.json")
exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, rbdFile)
if err != nil {
return err
}
if exists {
klog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath)
err = util.cleanOldRBDFile(plugin, rbdFile)
if err != nil {
klog.Errorf("rbd: failed to clean %s", rbdFile)
return err
}
klog.V(3).Infof("rbd: successfully remove %s", rbdFile)
}
return nil
}
// DetachBlockDisk detaches the disk from the node.
func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error {
if pathExists, pathErr := mount.PathExists(mapPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
klog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath)
return nil
}
// If we arrive here, device is no longer used, see if we need to logout of the target
device, err := getBlockVolumeDevice(mapPath)
if err != nil {
return err
}
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
exec := disk.plugin.host.GetExec(disk.plugin.GetPluginName())
var rbdCmd string
// Unlike map, we cannot fallthrough here.
// Any nbd device must be unmapped by rbd-nbd
if strings.HasPrefix(device, "/dev/nbd") {
rbdCmd = "rbd-nbd"
klog.V(4).Infof("rbd: using rbd-nbd for unmap function")
} else {
rbdCmd = "rbd"
klog.V(4).Infof("rbd: using rbd for unmap function")
}
// rbd unmap
output, err := exec.Run(rbdCmd, "unmap", device)
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output)))
}
klog.V(3).Infof("rbd: successfully unmap device %s", device)
return nil
}
// cleanOldRBDFile read rbd info from rbd.json file and removes lock if found.
// At last, it removes rbd.json file.
func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error {
mounter := &rbdMounter{
// util.rbdUnlock needs it to run command.
rbd: newRBD("", "", "", "", false, plugin, util),
}
fp, err := os.Open(rbdFile)
if err != nil {
return fmt.Errorf("rbd: open err %s/%s", rbdFile, err)
}
defer fp.Close()
decoder := json.NewDecoder(fp)
if err = decoder.Decode(mounter); err != nil {
return fmt.Errorf("rbd: decode err: %v.", err)
}
if err != nil {
klog.Errorf("failed to load rbd info from %s: %v", rbdFile, err)
return err
}
// Remove rbd lock if found.
// The disk is not attached to this node anymore, so the lock on image
// for this node can be removed safely.
err = util.rbdUnlock(*mounter)
if err == nil {
os.Remove(rbdFile)
}
return err
}
func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, size int, err error) {
var output []byte
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// Convert to MB that rbd defaults on.
sz, err := volumehelpers.RoundUpToMiBInt(capacity)
if err != nil {
return nil, 0, err
}
volSz := fmt.Sprintf("%d", sz)
mon := util.kernelRBDMonitorsOpt(p.Mon)
if p.rbdMounter.imageFormat == rbdImageFormat2 {
klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
} else {
klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
}
args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat}
if p.rbdMounter.imageFormat == rbdImageFormat2 {
// If no image features is provided, it results in empty string
// which disable all RBD image format 2 features as expected.
features := strings.Join(p.rbdMounter.imageFeatures, ",")
args = append(args, "--image-feature", features)
}
output, err = p.exec.Run("rbd", args...)
if err != nil {
klog.Warningf("failed to create rbd image, output %v", string(output))
return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output))
}
return &v1.RBDPersistentVolumeSource{
CephMonitors: p.rbdMounter.Mon,
RBDImage: p.rbdMounter.Image,
RBDPool: p.rbdMounter.Pool,
}, sz, nil
}
func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error {
var output []byte
found, rbdOutput, err := util.rbdStatus(p.rbdMounter)
if err != nil {
return fmt.Errorf("error %v, rbd output: %v", err, rbdOutput)
}
if found {
klog.Info("rbd is still being used ", p.rbdMounter.Image)
return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput)
}
// rbd rm.
mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon)
klog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret)
output, err = p.exec.Run("rbd",
"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret)
if err == nil {
return nil
}
klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
return fmt.Errorf("error %v, rbd output: %v", err, string(output))
}
// ExpandImage runs rbd resize command to resize the specified image.
func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
var output []byte
var err error
// Convert to MB that rbd defaults on.
sz := int(volumehelpers.RoundUpToMiB(newSize))
newVolSz := fmt.Sprintf("%d", sz)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz))
// Check the current size of rbd image, if equals to or greater that the new request size, do nothing.
curSize, infoErr := util.rbdInfo(rbdExpander.rbdMounter)
if infoErr != nil {
return oldSize, fmt.Errorf("rbd info failed, error: %v", infoErr)
}
if curSize >= sz {
return newSizeQuant, nil
}
// rbd resize.
mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon)
klog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret)
output, err = rbdExpander.exec.Run("rbd",
"resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret)
if err == nil {
return newSizeQuant, nil
}
klog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output))
return oldSize, err
}
// rbdInfo runs `rbd info` command to get the current image size in MB.
func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) {
var err error
var output []byte
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.adminId
secret := b.adminSecret
if id == "" {
id = b.Id
secret = b.Secret
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
// cmd "rbd info" get the image info with the following output:
//
// # image exists (exit=0)
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08
// size 1024 MB in 256 objects
// order 22 (4096 kB objects)
// block_name_prefix: rbd_data.1253ac238e1f29
// format: 2
// ...
//
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json
// {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]}
//
//
// # image does not exist (exit=2)
// rbd: error opening image 1234: (2) No such file or directory
//
klog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
output, err = b.exec.Run("rbd",
"info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret, "--format=json")
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
klog.Errorf("rbd cmd not found")
// fail fast if rbd command is not found.
return 0, err
}
}
// If command never succeed, returns its last error.
if err != nil {
return 0, err
}
if len(output) == 0 {
return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, string(output))
}
return getRbdImageSize(output)
}
func getRbdImageSize(output []byte) (int, error) {
info := struct {
Size int64 `json:"size"`
}{}
if err := json.Unmarshal(output, &info); err != nil {
return 0, fmt.Errorf("parse rbd info output failed: %s, %v", string(output), err)
}
return int(info.Size / rbdImageSizeUnitMiB), nil
}
// rbdStatus runs `rbd status` command to check if there is watcher on the image.
func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) {
var err error
var output string
var cmd []byte
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.adminId
secret := b.adminSecret
if id == "" {
id = b.Id
secret = b.Secret
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
// cmd "rbd status" list the rbd client watch with the following output:
//
// # there is a watcher (exit=0)
// Watchers:
// watcher=10.16.153.105:0/710245699 client.14163 cookie=1
//
// # there is no watcher (exit=0)
// Watchers: none
//
// Otherwise, exit is non-zero, for example:
//
// # image does not exist (exit=2)
// rbd: error opening image kubernetes-dynamic-pvc-<UUID>: (2) No such file or directory
//
klog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret)
cmd, err = b.exec.Run("rbd",
"status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret)
output = string(cmd)
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
klog.Errorf("rbd cmd not found")
// fail fast if command not found
return false, output, err
}
}
// If command never succeed, returns its last error.
if err != nil {
return false, output, err
}
if strings.Contains(output, imageWatcherStr) {
klog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output)
return true, output, nil
} else {
klog.Warningf("rbd: no watchers on %s", b.Image)
return false, output, nil
}
}

View File

@ -1,71 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"sio_mgr_test.go",
"sio_util_test.go",
"sio_volume_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/codedellemc/goscaleio/types/v1:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"sio_client.go",
"sio_mgr.go",
"sio_plugin.go",
"sio_util.go",
"sio_volume.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/scaleio",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/github.com/codedellemc/goscaleio:go_default_library",
"//vendor/github.com/codedellemc/goscaleio/types/v1:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/keymutex:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,539 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleio
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"k8s.io/kubernetes/pkg/util/mount"
sio "github.com/codedellemc/goscaleio"
siotypes "github.com/codedellemc/goscaleio/types/v1"
"k8s.io/klog"
)
var (
sioDiskIDPath = "/dev/disk/by-id"
)
type sioVolumeID string
type sioInterface interface {
FindVolume(name string) (*siotypes.Volume, error)
Volume(sioVolumeID) (*siotypes.Volume, error)
CreateVolume(name string, sizeGB int64) (*siotypes.Volume, error)
AttachVolume(sioVolumeID, bool) error
DetachVolume(sioVolumeID) error
DeleteVolume(sioVolumeID) error
IID() (string, error)
Devs() (map[string]string, error)
WaitForAttachedDevice(token string) (string, error)
WaitForDetachedDevice(token string) error
GetVolumeRefs(sioVolumeID) (int, error)
}
type sioClient struct {
client *sio.Client
gateway string
username string
password string
insecure bool
certsEnabled bool
system *siotypes.System
sysName string
sysClient *sio.System
protectionDomain *siotypes.ProtectionDomain
pdName string
pdClient *sio.ProtectionDomain
storagePool *siotypes.StoragePool
spName string
spClient *sio.StoragePool
provisionMode string
sdcPath string
sdcGUID string
instanceID string
inited bool
diskRegex *regexp.Regexp
mtx sync.Mutex
exec mount.Exec
}
func newSioClient(gateway, username, password string, sslEnabled bool, exec mount.Exec) (*sioClient, error) {
client := new(sioClient)
client.gateway = gateway
client.username = username
client.password = password
client.exec = exec
if sslEnabled {
client.insecure = false
client.certsEnabled = true
} else {
client.insecure = true
client.certsEnabled = false
}
r, err := regexp.Compile(`^emc-vol-\w*-\w*$`)
if err != nil {
klog.Error(log("failed to compile regex: %v", err))
return nil, err
}
client.diskRegex = r
// delay client setup/login until init()
return client, nil
}
// init setups client and authenticate
func (c *sioClient) init() error {
c.mtx.Lock()
defer c.mtx.Unlock()
if c.inited {
return nil
}
klog.V(4).Infoln(log("initializing scaleio client"))
client, err := sio.NewClientWithArgs(c.gateway, "", c.insecure, c.certsEnabled)
if err != nil {
klog.Error(log("failed to create client: %v", err))
return err
}
c.client = client
if _, err = c.client.Authenticate(
&sio.ConfigConnect{
Endpoint: c.gateway,
Version: "",
Username: c.username,
Password: c.password},
); err != nil {
klog.Error(log("client authentication failed: %v", err))
return err
}
// retrieve system
if c.system, err = c.findSystem(c.sysName); err != nil {
klog.Error(log("unable to find system %s: %v", c.sysName, err))
return err
}
// retrieve protection domain
if c.protectionDomain, err = c.findProtectionDomain(c.pdName); err != nil {
klog.Error(log("unable to find protection domain %s: %v", c.protectionDomain, err))
return err
}
// retrieve storage pool
if c.storagePool, err = c.findStoragePool(c.spName); err != nil {
klog.Error(log("unable to find storage pool %s: %v", c.storagePool, err))
return err
}
c.inited = true
return nil
}
func (c *sioClient) Volumes() ([]*siotypes.Volume, error) {
if err := c.init(); err != nil {
return nil, err
}
vols, err := c.getVolumes()
if err != nil {
klog.Error(log("failed to retrieve volumes: %v", err))
return nil, err
}
return vols, nil
}
func (c *sioClient) Volume(id sioVolumeID) (*siotypes.Volume, error) {
if err := c.init(); err != nil {
return nil, err
}
vols, err := c.getVolumesByID(id)
if err != nil {
klog.Error(log("failed to retrieve volume by id: %v", err))
return nil, err
}
vol := vols[0]
if vol == nil {
klog.V(4).Info(log("volume not found, id %s", id))
return nil, errors.New("volume not found")
}
return vol, nil
}
func (c *sioClient) FindVolume(name string) (*siotypes.Volume, error) {
if err := c.init(); err != nil {
return nil, err
}
klog.V(4).Info(log("searching for volume %s", name))
volumes, err := c.getVolumesByName(name)
if err != nil {
klog.Error(log("failed to find volume by name %v", err))
return nil, err
}
for _, volume := range volumes {
if volume.Name == name {
klog.V(4).Info(log("found volume %s", name))
return volume, nil
}
}
klog.V(4).Info(log("volume not found, name %s", name))
return nil, errors.New("volume not found")
}
func (c *sioClient) CreateVolume(name string, sizeGB int64) (*siotypes.Volume, error) {
if err := c.init(); err != nil {
return nil, err
}
params := &siotypes.VolumeParam{
Name: name,
VolumeSizeInKb: strconv.Itoa(int(sizeGB) * 1024 * 1024),
VolumeType: c.provisionMode,
}
createResponse, err := c.client.CreateVolume(params, c.storagePool.Name)
if err != nil {
klog.Error(log("failed to create volume %s: %v", name, err))
return nil, err
}
return c.Volume(sioVolumeID(createResponse.ID))
}
// AttachVolume maps the scaleio volume to an sdc node. If the multipleMappings flag
// is true, ScaleIO will allow other SDC to map to that volume.
func (c *sioClient) AttachVolume(id sioVolumeID, multipleMappings bool) error {
if err := c.init(); err != nil {
klog.Error(log("failed to init'd client in attach volume: %v", err))
return err
}
iid, err := c.IID()
if err != nil {
klog.Error(log("failed to get instanceIID for attach volume: %v", err))
return err
}
params := &siotypes.MapVolumeSdcParam{
SdcID: iid,
AllowMultipleMappings: strconv.FormatBool(multipleMappings),
AllSdcs: "",
}
volClient := sio.NewVolume(c.client)
volClient.Volume = &siotypes.Volume{ID: string(id)}
if err := volClient.MapVolumeSdc(params); err != nil {
klog.Error(log("failed to attach volume id %s: %v", id, err))
return err
}
klog.V(4).Info(log("volume %s attached successfully", id))
return nil
}
// DetachVolume detaches the volume with specified id.
func (c *sioClient) DetachVolume(id sioVolumeID) error {
if err := c.init(); err != nil {
return err
}
iid, err := c.IID()
if err != nil {
return err
}
params := &siotypes.UnmapVolumeSdcParam{
SdcID: "",
IgnoreScsiInitiators: "true",
AllSdcs: iid,
}
volClient := sio.NewVolume(c.client)
volClient.Volume = &siotypes.Volume{ID: string(id)}
if err := volClient.UnmapVolumeSdc(params); err != nil {
return err
}
return nil
}
// DeleteVolume deletes the volume with the specified id
func (c *sioClient) DeleteVolume(id sioVolumeID) error {
if err := c.init(); err != nil {
return err
}
vol, err := c.Volume(id)
if err != nil {
return err
}
volClient := sio.NewVolume(c.client)
volClient.Volume = vol
if err := volClient.RemoveVolume("ONLY_ME"); err != nil {
return err
}
return nil
}
// IID returns the scaleio instance id for node
func (c *sioClient) IID() (string, error) {
if err := c.init(); err != nil {
return "", err
}
// if instanceID not set, retrieve it
if c.instanceID == "" {
guid, err := c.getGUID()
if err != nil {
return "", err
}
sdc, err := c.sysClient.FindSdc("SdcGUID", guid)
if err != nil {
klog.Error(log("failed to retrieve sdc info %s", err))
return "", err
}
c.instanceID = sdc.Sdc.ID
klog.V(4).Info(log("retrieved instanceID %s", c.instanceID))
}
return c.instanceID, nil
}
// getGUID returns instance GUID, if not set using resource labels
// it attempts to fallback to using drv_cfg binary
func (c *sioClient) getGUID() (string, error) {
if c.sdcGUID == "" {
klog.V(4).Info(log("sdc guid label not set, falling back to using drv_cfg"))
cmd := c.getSdcCmd()
output, err := c.exec.Run(cmd, "--query_guid")
if err != nil {
klog.Error(log("drv_cfg --query_guid failed: %v", err))
return "", err
}
c.sdcGUID = strings.TrimSpace(string(output))
}
return c.sdcGUID, nil
}
// getSioDiskPaths traverse local disk devices to retrieve device path
// The path is extracted from /dev/disk/by-id; each sio device path has format:
// emc-vol-<mdmID-volID> e.g.:
// emc-vol-788d9efb0a8f20cb-a2b8419300000000
func (c *sioClient) getSioDiskPaths() ([]os.FileInfo, error) {
files, err := ioutil.ReadDir(sioDiskIDPath)
if err != nil {
if os.IsNotExist(err) {
// sioDiskIDPath may not exist yet which is fine
return []os.FileInfo{}, nil
}
klog.Error(log("failed to ReadDir %s: %v", sioDiskIDPath, err))
return nil, err
}
result := []os.FileInfo{}
for _, file := range files {
if c.diskRegex.MatchString(file.Name()) {
result = append(result, file)
}
}
return result, nil
}
// GetVolumeRefs counts the number of references an SIO volume has a disk device.
// This is useful in preventing premature detach.
func (c *sioClient) GetVolumeRefs(volID sioVolumeID) (refs int, err error) {
files, err := c.getSioDiskPaths()
if err != nil {
return 0, err
}
for _, file := range files {
if strings.Contains(file.Name(), string(volID)) {
refs++
}
}
return
}
// Devs returns a map of local devices as map[<volume.id>]<deviceName>
func (c *sioClient) Devs() (map[string]string, error) {
volumeMap := make(map[string]string)
files, err := c.getSioDiskPaths()
if err != nil {
return nil, err
}
for _, f := range files {
// split emc-vol-<mdmID>-<volumeID> to pull out volumeID
parts := strings.Split(f.Name(), "-")
if len(parts) != 4 {
return nil, errors.New("unexpected ScaleIO device name format")
}
volumeID := parts[3]
devPath, err := filepath.EvalSymlinks(fmt.Sprintf("%s/%s", sioDiskIDPath, f.Name()))
if err != nil {
klog.Error(log("devicepath-to-volID mapping error: %v", err))
return nil, err
}
// map volumeID to devicePath
volumeMap[volumeID] = devPath
}
return volumeMap, nil
}
// WaitForAttachedDevice sets up a timer to wait for an attached device to appear in the instance's list.
func (c *sioClient) WaitForAttachedDevice(token string) (string, error) {
if token == "" {
return "", fmt.Errorf("invalid attach token")
}
// wait for device to show up in local device list
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
timer := time.NewTimer(30 * time.Second)
defer timer.Stop()
for {
select {
case <-ticker.C:
devMap, err := c.Devs()
if err != nil {
klog.Error(log("failed while waiting for volume to attach: %v", err))
return "", err
}
go func() {
klog.V(4).Info(log("waiting for volume %s to be mapped/attached", token))
}()
if path, ok := devMap[token]; ok {
klog.V(4).Info(log("device %s mapped to vol %s", path, token))
return path, nil
}
case <-timer.C:
klog.Error(log("timed out while waiting for volume to be mapped to a device"))
return "", fmt.Errorf("volume attach timeout")
}
}
}
// waitForDetachedDevice waits for device to be detached
func (c *sioClient) WaitForDetachedDevice(token string) error {
if token == "" {
return fmt.Errorf("invalid detach token")
}
// wait for attach.Token to show up in local device list
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
timer := time.NewTimer(30 * time.Second)
defer timer.Stop()
for {
select {
case <-ticker.C:
devMap, err := c.Devs()
if err != nil {
klog.Error(log("failed while waiting for volume to unmap/detach: %v", err))
return err
}
go func() {
klog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token))
}()
// cant find vol id, then ok.
if _, ok := devMap[token]; !ok {
return nil
}
case <-timer.C:
klog.Error(log("timed out while waiting for volume %s to be unmapped/detached", token))
return fmt.Errorf("volume detach timeout")
}
}
}
// ***********************************************************************
// Little Helpers!
// ***********************************************************************
func (c *sioClient) findSystem(sysname string) (sys *siotypes.System, err error) {
if c.sysClient, err = c.client.FindSystem("", sysname, ""); err != nil {
return nil, err
}
systems, err := c.client.GetInstance("")
if err != nil {
klog.Error(log("failed to retrieve instances: %v", err))
return nil, err
}
for _, sys = range systems {
if sys.Name == sysname {
return sys, nil
}
}
klog.Error(log("system %s not found", sysname))
return nil, errors.New("system not found")
}
func (c *sioClient) findProtectionDomain(pdname string) (*siotypes.ProtectionDomain, error) {
c.pdClient = sio.NewProtectionDomain(c.client)
if c.sysClient != nil {
protectionDomain, err := c.sysClient.FindProtectionDomain("", pdname, "")
if err != nil {
klog.Error(log("failed to retrieve protection domains: %v", err))
return nil, err
}
c.pdClient.ProtectionDomain = protectionDomain
return protectionDomain, nil
}
klog.Error(log("protection domain %s not set", pdname))
return nil, errors.New("protection domain not set")
}
func (c *sioClient) findStoragePool(spname string) (*siotypes.StoragePool, error) {
c.spClient = sio.NewStoragePool(c.client)
if c.pdClient != nil {
sp, err := c.pdClient.FindStoragePool("", spname, "")
if err != nil {
klog.Error(log("failed to retrieve storage pool: %v", err))
return nil, err
}
c.spClient.StoragePool = sp
return sp, nil
}
klog.Error(log("storage pool %s not set", spname))
return nil, errors.New("storage pool not set")
}
func (c *sioClient) getVolumes() ([]*siotypes.Volume, error) {
return c.client.GetVolume("", "", "", "", true)
}
func (c *sioClient) getVolumesByID(id sioVolumeID) ([]*siotypes.Volume, error) {
return c.client.GetVolume("", string(id), "", "", true)
}
func (c *sioClient) getVolumesByName(name string) ([]*siotypes.Volume, error) {
return c.client.GetVolume("", "", "", name, true)
}
func (c *sioClient) getSdcPath() string {
return sdcRootPath
}
func (c *sioClient) getSdcCmd() string {
return path.Join(c.getSdcPath(), "drv_cfg")
}

View File

@ -1,248 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleio
import (
"errors"
"strconv"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/klog"
siotypes "github.com/codedellemc/goscaleio/types/v1"
)
type storageInterface interface {
CreateVolume(string, int64) (*siotypes.Volume, error)
AttachVolume(string, bool) (string, error)
IsAttached(string) (bool, error)
DetachVolume(string) error
DeleteVolume(string) error
}
type sioMgr struct {
client sioInterface
configData map[string]string
exec mount.Exec
}
func newSioMgr(configs map[string]string, exec mount.Exec) (*sioMgr, error) {
if configs == nil {
return nil, errors.New("missing configuration data")
}
configs[confKey.protectionDomain] = defaultString(configs[confKey.protectionDomain], "default")
configs[confKey.storagePool] = defaultString(configs[confKey.storagePool], "default")
configs[confKey.sdcRootPath] = defaultString(configs[confKey.sdcRootPath], sdcRootPath)
configs[confKey.storageMode] = defaultString(configs[confKey.storageMode], "ThinProvisioned")
mgr := &sioMgr{configData: configs, exec: exec}
return mgr, nil
}
// getClient safely returns an sioInterface
func (m *sioMgr) getClient() (sioInterface, error) {
if m.client == nil {
klog.V(4).Info(log("creating scaleio client"))
configs := m.configData
username := configs[confKey.username]
password := configs[confKey.password]
gateway := configs[confKey.gateway]
b, err := strconv.ParseBool(configs[confKey.sslEnabled])
if err != nil {
klog.Error(log("failed to parse sslEnabled, must be either \"true\" or \"false\""))
return nil, err
}
certsEnabled := b
klog.V(4).Info(log("creating new client for gateway %s", gateway))
client, err := newSioClient(gateway, username, password, certsEnabled, m.exec)
if err != nil {
klog.Error(log("failed to create scaleio client: %v", err))
return nil, err
}
client.sysName = configs[confKey.system]
client.pdName = configs[confKey.protectionDomain]
client.spName = configs[confKey.storagePool]
client.sdcPath = configs[confKey.sdcRootPath]
client.provisionMode = configs[confKey.storageMode]
client.sdcGUID = configs[confKey.sdcGUID]
m.client = client
klog.V(4).Info(log("client created successfully [gateway=%s]", gateway))
}
return m.client, nil
}
// CreateVolume creates a new ScaleIO volume
func (m *sioMgr) CreateVolume(volName string, sizeGB int64) (*siotypes.Volume, error) {
client, err := m.getClient()
if err != nil {
return nil, err
}
klog.V(4).Infof("scaleio: creating volume %s", volName)
vol, err := client.CreateVolume(volName, sizeGB)
if err != nil {
klog.V(4).Infof("scaleio: failed creating volume %s: %v", volName, err)
return nil, err
}
klog.V(4).Infof("scaleio: created volume %s successfully", volName)
return vol, nil
}
// AttachVolume maps a ScaleIO volume to the running node. If flag multiMaps,
// ScaleIO will allow other SDC to map to volume.
func (m *sioMgr) AttachVolume(volName string, multipleMappings bool) (string, error) {
client, err := m.getClient()
if err != nil {
klog.Error(log("attach volume failed: %v", err))
return "", err
}
klog.V(4).Infoln(log("attaching volume %s", volName))
iid, err := client.IID()
if err != nil {
klog.Error(log("failed to get instanceID"))
return "", err
}
klog.V(4).Info(log("attaching volume %s to host instance %s", volName, iid))
devs, err := client.Devs()
if err != nil {
return "", err
}
vol, err := client.FindVolume(volName)
if err != nil {
klog.Error(log("failed to find volume %s: %v", volName, err))
return "", err
}
// handle vol if already attached
if len(vol.MappedSdcInfo) > 0 {
if m.isSdcMappedToVol(iid, vol) {
klog.V(4).Info(log("skippping attachment, volume %s already attached to sdc %s", volName, iid))
return devs[vol.ID], nil
}
}
// attach volume, get deviceName
if err := client.AttachVolume(sioVolumeID(vol.ID), multipleMappings); err != nil {
klog.Error(log("attachment for volume %s failed :%v", volName, err))
return "", err
}
device, err := client.WaitForAttachedDevice(vol.ID)
if err != nil {
klog.Error(log("failed while waiting for device to attach: %v", err))
return "", err
}
klog.V(4).Info(log("volume %s attached successfully as %s to instance %s", volName, device, iid))
return device, nil
}
// IsAttached verifies that the named ScaleIO volume is still attached
func (m *sioMgr) IsAttached(volName string) (bool, error) {
client, err := m.getClient()
if err != nil {
return false, err
}
iid, err := client.IID()
if err != nil {
klog.Error("scaleio: failed to get instanceID")
return false, err
}
vol, err := client.FindVolume(volName)
if err != nil {
return false, err
}
return m.isSdcMappedToVol(iid, vol), nil
}
// DetachVolume detaches the name ScaleIO volume from an instance
func (m *sioMgr) DetachVolume(volName string) error {
client, err := m.getClient()
if err != nil {
return err
}
iid, err := client.IID()
if err != nil {
klog.Error(log("failed to get instanceID: %v", err))
return err
}
vol, err := client.FindVolume(volName)
if err != nil {
return err
}
if !m.isSdcMappedToVol(iid, vol) {
klog.Warning(log(
"skipping detached, vol %s not attached to instance %s",
volName, iid,
))
return nil
}
if err := client.DetachVolume(sioVolumeID(vol.ID)); err != nil {
klog.Error(log("failed to detach vol %s: %v", volName, err))
return err
}
klog.V(4).Info(log("volume %s detached successfully", volName))
return nil
}
// DeleteVolumes removes the ScaleIO volume
func (m *sioMgr) DeleteVolume(volName string) error {
client, err := m.getClient()
if err != nil {
return err
}
vol, err := client.FindVolume(volName)
if err != nil {
return err
}
if err := client.DeleteVolume(sioVolumeID(vol.ID)); err != nil {
klog.Error(log("failed to delete volume %s: %v", volName, err))
return err
}
klog.V(4).Info(log("deleted volume %s successfully", volName))
return nil
}
// isSdcMappedToVol returns true if the sdc is mapped to the volume
func (m *sioMgr) isSdcMappedToVol(sdcID string, vol *siotypes.Volume) bool {
if len(vol.MappedSdcInfo) == 0 {
klog.V(4).Info(log("no attachment found"))
return false
}
for _, sdcInfo := range vol.MappedSdcInfo {
if sdcInfo.SdcID == sdcID {
return true
}
}
return false
}

View File

@ -1,338 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleio
import (
"errors"
"testing"
"time"
"k8s.io/kubernetes/pkg/util/mount"
siotypes "github.com/codedellemc/goscaleio/types/v1"
)
var (
fakeSdcID = "test-sdc-123456789"
fakeVolumeName = "test-vol-0001"
fakeVolumeID = "1234567890"
fakeDev = "/dev/testABC"
fakeConfig = map[string]string{
confKey.gateway: "http://sio.gateway:1234",
confKey.sslEnabled: "false",
confKey.system: "scaleio",
confKey.volumeName: "sio-0001",
confKey.secretName: "sio-secret",
confKey.username: "c2lvdXNlcgo=", // siouser
confKey.password: "c2lvcGFzc3dvcmQK", // siopassword
}
)
func newTestMgr(t *testing.T) *sioMgr {
mgr, err := newSioMgr(fakeConfig, mount.NewFakeExec(nil))
if err != nil {
t.Error(err)
}
mgr.client = newFakeSio()
return mgr
}
func TestMgrNew(t *testing.T) {
mgr, err := newSioMgr(fakeConfig, mount.NewFakeExec(nil))
if err != nil {
t.Fatal(err)
}
if mgr.configData == nil {
t.Fatal("configuration data not set")
}
if mgr.configData[confKey.volumeName] != "sio-0001" {
t.Errorf("expecting %s, got %s", "sio-0001", mgr.configData[confKey.volumeName])
}
// check defaults
if mgr.configData[confKey.protectionDomain] != "default" {
t.Errorf("unexpected value for confData[protectionDomain] %s", mgr.configData[confKey.protectionDomain])
}
if mgr.configData[confKey.storagePool] != "default" {
t.Errorf("unexpected value for confData[storagePool] %s", mgr.configData[confKey.storagePool])
}
if mgr.configData[confKey.storageMode] != "ThinProvisioned" {
t.Errorf("unexpected value for confData[storageMode] %s", mgr.configData[confKey.storageMode])
}
}
func TestMgrGetClient(t *testing.T) {
mgr := newTestMgr(t)
_, err := mgr.getClient()
if err != nil {
t.Fatal(err)
}
if mgr.client == nil {
t.Fatal("mgr.client not set")
}
}
func TestMgrCreateVolume(t *testing.T) {
mgr := newTestMgr(t)
vol, err := mgr.CreateVolume("test-vol-0001", 8*1024*1024)
if err != nil {
t.Fatal(err)
}
if vol.Name != "test-vol-0001" {
t.Errorf("unexpected vol.Name %s", vol.Name)
}
}
func TestMgrAttachVolume(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
device, err := mgr.AttachVolume("test-vol-0001", false)
if err != nil {
t.Fatal(err)
}
if device != "/dev/testABC" {
t.Errorf("unexpected value for mapped device %s", device)
}
}
func TestMgrAttachVolume_AlreadyAttached(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
mgr.AttachVolume("test-vol-0001", false)
dev, err := mgr.AttachVolume("test-vol-0001", false)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if dev != "/dev/testABC" {
t.Errorf("unexpected value for mapped device %s", dev)
}
}
func TestMgrAttachVolume_VolumeNotFoundError(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
_, err := mgr.AttachVolume("test-vol-0002", false)
if err == nil {
t.Error("attachVolume should fail with volume not found error")
}
}
func TestMgrAttachVolume_WaitForAttachError(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
go func() {
c := mgr.client.(*fakeSio)
close(c.waitAttachCtrl)
}()
_, err := mgr.AttachVolume("test-vol-0001", false)
if err == nil {
t.Error("attachVolume should fail with attach timeout error")
}
}
func TestMgrDetachVolume(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
mgr.AttachVolume("test-vol-0001", false)
if err := mgr.DetachVolume("test-vol-0001"); err != nil {
t.Fatal(err)
}
fakeSio := mgr.client.(*fakeSio)
if len(fakeSio.volume.MappedSdcInfo) != 0 {
t.Errorf("expecting attached sdc to 0, got %d", len(fakeSio.volume.MappedSdcInfo))
}
if len(fakeSio.devs) != 0 {
t.Errorf("expecting local devs to be 0, got %d", len(fakeSio.devs))
}
}
func TestMgrDetachVolume_VolumeNotFound(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
mgr.AttachVolume("test-vol-0001", false)
err := mgr.DetachVolume("test-vol-0002")
if err == nil {
t.Fatal("expected a volume not found failure")
}
}
func TestMgrDetachVolume_VolumeNotAttached(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
err := mgr.DetachVolume("test-vol-0001")
if err != nil {
t.Fatal(err)
}
}
func TestMgrDetachVolume_VolumeAlreadyDetached(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
mgr.AttachVolume("test-vol-0001", false)
mgr.DetachVolume("test-vol-0001")
err := mgr.DetachVolume("test-vol-0001")
if err != nil {
t.Fatal("failed detaching a volume already detached")
}
}
func TestMgrDetachVolume_WaitForDetachError(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
mgr.AttachVolume("test-vol-0001", false)
err := mgr.DetachVolume("test-vol-0001")
if err != nil {
t.Error("detachVolume failed")
}
}
func TestMgrDeleteVolume(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
err := mgr.DeleteVolume("test-vol-0001")
if err != nil {
t.Fatal(err)
}
sio := mgr.client.(*fakeSio)
if sio.volume != nil {
t.Errorf("volume not nil after delete operation")
}
}
func TestMgrDeleteVolume_VolumeNotFound(t *testing.T) {
mgr := newTestMgr(t)
mgr.CreateVolume("test-vol-0001", 8*1024*1024)
err := mgr.DeleteVolume("test-vol-0002")
if err == nil {
t.Fatal("expected volume not found error")
}
}
// ************************************************************
// Helper Test Types
// ************************************************************
type fakeSio struct {
volume *siotypes.Volume
waitAttachCtrl chan struct{}
waitDetachCtrl chan struct{}
devs map[string]string
isMultiMap bool
}
func newFakeSio() *fakeSio {
return &fakeSio{
waitAttachCtrl: make(chan struct{}),
waitDetachCtrl: make(chan struct{}),
}
}
func (f *fakeSio) FindVolume(volumeName string) (*siotypes.Volume, error) {
if f.volume == nil || f.volume.Name != volumeName {
return nil, errors.New("volume not found")
}
return f.volume, nil
}
func (f *fakeSio) Volume(id sioVolumeID) (*siotypes.Volume, error) {
if f.volume == nil || f.volume.ID != string(id) {
return nil, errors.New("volume not found")
}
return f.volume, nil
}
func (f *fakeSio) CreateVolume(volName string, sizeGB int64) (*siotypes.Volume, error) {
f.volume = &siotypes.Volume{
ID: fakeVolumeID,
Name: volName,
SizeInKb: int(sizeGB),
VolumeType: "test",
}
return f.volume, nil
}
func (f *fakeSio) AttachVolume(id sioVolumeID, multiMaps bool) error {
f.isMultiMap = multiMaps
_, err := f.Volume(id)
if err != nil {
return err
}
f.volume.MappedSdcInfo = []*siotypes.MappedSdcInfo{
{SdcID: fakeSdcID},
}
return nil
}
func (f *fakeSio) DetachVolume(id sioVolumeID) error {
if _, err := f.Volume(id); err != nil {
return err
}
f.volume.MappedSdcInfo = nil
delete(f.devs, f.volume.ID)
return nil
}
func (f *fakeSio) DeleteVolume(id sioVolumeID) error {
if _, err := f.Volume(id); err != nil {
return err
}
f.volume = nil
return nil
}
func (f *fakeSio) IID() (string, error) {
return fakeSdcID, nil
}
func (f *fakeSio) Devs() (map[string]string, error) {
if f.volume == nil {
return nil, errors.New("volume not found")
}
f.devs = map[string]string{
f.volume.ID: fakeDev,
}
return f.devs, nil
}
func (f *fakeSio) GetVolumeRefs(volID sioVolumeID) (int, error) {
if f.volume == nil {
return 0, nil
}
return 1, nil
}
func (f *fakeSio) WaitForAttachedDevice(token string) (string, error) {
select {
case <-time.After(500 * time.Millisecond):
return fakeDev, nil
case <-f.waitAttachCtrl:
return "", errors.New("attached device timeout")
}
}
func (f *fakeSio) WaitForDetachedDevice(token string) error {
select {
case <-time.After(500 * time.Millisecond):
delete(f.devs, f.volume.ID)
return nil
case <-f.waitDetachCtrl:
return errors.New("detach device timeout")
}
}

View File

@ -1,224 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleio
import (
"errors"
api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/utils/keymutex"
)
const (
sioPluginName = "kubernetes.io/scaleio"
sioConfigFileName = "sioconf.dat"
)
type sioPlugin struct {
host volume.VolumeHost
volumeMtx keymutex.KeyMutex
}
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
p := &sioPlugin{
host: nil,
}
return []volume.VolumePlugin{p}
}
// *******************
// VolumePlugin Impl
// *******************
var _ volume.VolumePlugin = &sioPlugin{}
func (p *sioPlugin) Init(host volume.VolumeHost) error {
p.host = host
p.volumeMtx = keymutex.NewHashed(0)
return nil
}
func (p *sioPlugin) GetPluginName() string {
return sioPluginName
}
func (p *sioPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
attribs, err := getVolumeSourceAttribs(spec)
if err != nil {
return "", err
}
return attribs.volName, nil
}
func (p *sioPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.ScaleIO != nil) ||
(spec.Volume != nil && spec.Volume.ScaleIO != nil)
}
func (p *sioPlugin) IsMigratedToCSI() bool {
return false
}
func (p *sioPlugin) RequiresRemount() bool {
return false
}
func (p *sioPlugin) NewMounter(
spec *volume.Spec,
pod *api.Pod,
_ volume.VolumeOptions) (volume.Mounter, error) {
// extract source info from either ScaleIOVolumeSource or ScaleIOPersistentVolumeSource type
attribs, err := getVolumeSourceAttribs(spec)
if err != nil {
return nil, errors.New(log("mounter failed to extract volume attributes from spec: %v", err))
}
secretName, secretNS, err := getSecretAndNamespaceFromSpec(spec, pod)
if err != nil {
return nil, errors.New(log("failed to get secret name or secretNamespace: %v", err))
}
return &sioVolume{
pod: pod,
spec: spec,
secretName: secretName,
secretNamespace: secretNS,
volSpecName: spec.Name(),
volName: attribs.volName,
podUID: pod.UID,
readOnly: attribs.readOnly,
fsType: attribs.fsType,
plugin: p,
}, nil
}
// NewUnmounter creates a representation of the volume to unmount
func (p *sioPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) {
klog.V(4).Info(log("Unmounter for %s", specName))
return &sioVolume{
podUID: podUID,
volSpecName: specName,
plugin: p,
}, nil
}
func (p *sioPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
sioVol := &api.Volume{
Name: volumeName,
VolumeSource: api.VolumeSource{
ScaleIO: &api.ScaleIOVolumeSource{},
},
}
return volume.NewSpecFromVolume(sioVol), nil
}
// SupportsMountOption returns true if volume plugins supports Mount options
// Specifying mount options in a volume plugin that doesn't support
// user specified mount options will result in error creating persistent volumes
func (p *sioPlugin) SupportsMountOption() bool {
return false
}
// SupportsBulkVolumeVerification checks if volume plugin type is capable
// of enabling bulk polling of all nodes. This can speed up verification of
// attached volumes by quite a bit, but underlying pluging must support it.
func (p *sioPlugin) SupportsBulkVolumeVerification() bool {
return false
}
//******************************
// PersistentVolumePlugin Impl
// *****************************
var _ volume.PersistentVolumePlugin = &sioPlugin{}
func (p *sioPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
}
}
// ***************************
// DeletableVolumePlugin Impl
//****************************
var _ volume.DeletableVolumePlugin = &sioPlugin{}
func (p *sioPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
attribs, err := getVolumeSourceAttribs(spec)
if err != nil {
klog.Error(log("deleter failed to extract volume attributes from spec: %v", err))
return nil, err
}
secretName, secretNS, err := getSecretAndNamespaceFromSpec(spec, nil)
if err != nil {
return nil, errors.New(log("failed to get secret name or secretNamespace: %v", err))
}
return &sioVolume{
spec: spec,
secretName: secretName,
secretNamespace: secretNS,
volSpecName: spec.Name(),
volName: attribs.volName,
plugin: p,
readOnly: attribs.readOnly,
}, nil
}
// *********************************
// ProvisionableVolumePlugin Impl
// *********************************
var _ volume.ProvisionableVolumePlugin = &sioPlugin{}
func (p *sioPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
klog.V(4).Info(log("creating Provisioner"))
configData := options.Parameters
if configData == nil {
klog.Error(log("provisioner missing parameters, unable to continue"))
return nil, errors.New("option parameters missing")
}
// Supports ref of name of secret a couple of ways:
// options.Parameters["secretRef"] for backward compat, or
// options.Parameters["secretName"]
secretName := configData[confKey.secretName]
if secretName == "" {
secretName = configData["secretName"]
configData[confKey.secretName] = secretName
}
secretNS := configData[confKey.secretNamespace]
if secretNS == "" {
secretNS = options.PVC.Namespace
}
return &sioVolume{
configData: configData,
plugin: p,
options: options,
secretName: secretName,
secretNamespace: secretNS,
volSpecName: options.PVName,
}, nil
}

View File

@ -1,337 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleio
import (
"encoding/gob"
"errors"
"fmt"
"os"
"path"
"strconv"
"k8s.io/klog"
api "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
type volSourceAttribs struct {
volName,
fsType string
readOnly bool
}
var (
confKey = struct {
gateway,
sslEnabled,
secretName,
system,
protectionDomain,
storagePool,
storageMode,
sdcRootPath,
volumeName,
volSpecName,
fsType,
readOnly,
username,
password,
secretNamespace,
sdcGUID string
}{
gateway: "gateway",
sslEnabled: "sslEnabled",
secretName: "secretRef",
secretNamespace: "secretNamespace",
system: "system",
protectionDomain: "protectionDomain",
storagePool: "storagePool",
storageMode: "storageMode",
sdcRootPath: "sdcRootPath",
volumeName: "volumeName",
volSpecName: "volSpecName",
fsType: "fsType",
readOnly: "readOnly",
username: "username",
password: "password",
sdcGUID: "sdcGUID",
}
sdcGUIDLabelName = "scaleio.sdcGUID"
sdcRootPath = "/opt/emc/scaleio/sdc/bin"
secretNotFoundErr = errors.New("secret not found")
configMapNotFoundErr = errors.New("configMap not found")
gatewayNotProvidedErr = errors.New("ScaleIO gateway not provided")
secretRefNotProvidedErr = errors.New("secret ref not provided")
systemNotProvidedErr = errors.New("ScaleIO system not provided")
storagePoolNotProvidedErr = errors.New("ScaleIO storage pool not provided")
protectionDomainNotProvidedErr = errors.New("ScaleIO protection domain not provided")
)
// mapVolumeSpec maps attributes from either ScaleIOVolumeSource or ScaleIOPersistentVolumeSource to config
func mapVolumeSpec(config map[string]string, spec *volume.Spec) {
if source, err := getScaleIOPersistentVolumeSourceFromSpec(spec); err == nil {
config[confKey.gateway] = source.Gateway
config[confKey.system] = source.System
config[confKey.volumeName] = source.VolumeName
config[confKey.sslEnabled] = strconv.FormatBool(source.SSLEnabled)
config[confKey.protectionDomain] = source.ProtectionDomain
config[confKey.storagePool] = source.StoragePool
config[confKey.storageMode] = source.StorageMode
config[confKey.fsType] = source.FSType
config[confKey.readOnly] = strconv.FormatBool(source.ReadOnly)
}
if source, err := getScaleIOVolumeSourceFromSpec(spec); err == nil {
config[confKey.gateway] = source.Gateway
config[confKey.system] = source.System
config[confKey.volumeName] = source.VolumeName
config[confKey.sslEnabled] = strconv.FormatBool(source.SSLEnabled)
config[confKey.protectionDomain] = source.ProtectionDomain
config[confKey.storagePool] = source.StoragePool
config[confKey.storageMode] = source.StorageMode
config[confKey.fsType] = source.FSType
config[confKey.readOnly] = strconv.FormatBool(source.ReadOnly)
}
//optionals
applyConfigDefaults(config)
}
func validateConfigs(config map[string]string) error {
if config[confKey.gateway] == "" {
return gatewayNotProvidedErr
}
if config[confKey.secretName] == "" {
return secretRefNotProvidedErr
}
if config[confKey.system] == "" {
return systemNotProvidedErr
}
if config[confKey.storagePool] == "" {
return storagePoolNotProvidedErr
}
if config[confKey.protectionDomain] == "" {
return protectionDomainNotProvidedErr
}
return nil
}
// applyConfigDefaults apply known defaults to incoming spec for dynamic PVCs.
func applyConfigDefaults(config map[string]string) {
b, err := strconv.ParseBool(config[confKey.sslEnabled])
if err != nil {
klog.Warning(log("failed to parse param sslEnabled, setting it to false"))
b = false
}
config[confKey.sslEnabled] = strconv.FormatBool(b)
config[confKey.storageMode] = defaultString(config[confKey.storageMode], "ThinProvisioned")
config[confKey.fsType] = defaultString(config[confKey.fsType], "xfs")
b, err = strconv.ParseBool(config[confKey.readOnly])
if err != nil {
klog.Warning(log("failed to parse param readOnly, setting it to false"))
b = false
}
config[confKey.readOnly] = strconv.FormatBool(b)
}
func defaultString(val, defVal string) string {
if val == "" {
return defVal
}
return val
}
// loadConfig loads configuration data from a file on disk
func loadConfig(configName string) (map[string]string, error) {
klog.V(4).Info(log("loading config file %s", configName))
file, err := os.Open(configName)
if err != nil {
klog.Error(log("failed to open config file %s: %v", configName, err))
return nil, err
}
defer file.Close()
data := map[string]string{}
if err := gob.NewDecoder(file).Decode(&data); err != nil {
klog.Error(log("failed to parse config data %s: %v", configName, err))
return nil, err
}
applyConfigDefaults(data)
if err := validateConfigs(data); err != nil {
klog.Error(log("failed to load ConfigMap %s: %v", err))
return nil, err
}
return data, nil
}
// saveConfig saves the configuration data to local disk
func saveConfig(configName string, data map[string]string) error {
klog.V(4).Info(log("saving config file %s", configName))
dir := path.Dir(configName)
if _, err := os.Stat(dir); err != nil {
if !os.IsNotExist(err) {
return err
}
klog.V(4).Info(log("creating config dir for config data: %s", dir))
if err := os.MkdirAll(dir, 0750); err != nil {
klog.Error(log("failed to create config data dir %v", err))
return err
}
}
file, err := os.Create(configName)
if err != nil {
klog.V(4).Info(log("failed to save config data file %s: %v", configName, err))
return err
}
defer file.Close()
if err := gob.NewEncoder(file).Encode(data); err != nil {
klog.Error(log("failed to save config %s: %v", configName, err))
return err
}
klog.V(4).Info(log("config data file saved successfully as %s", configName))
return nil
}
// attachSecret loads secret object and attaches to configData
func attachSecret(plug *sioPlugin, namespace string, configData map[string]string) error {
// load secret
secretRefName := configData[confKey.secretName]
kubeClient := plug.host.GetKubeClient()
secretMap, err := volutil.GetSecretForPV(namespace, secretRefName, sioPluginName, kubeClient)
if err != nil {
klog.Error(log("failed to get secret: %v", err))
return secretNotFoundErr
}
// merge secret data
for key, val := range secretMap {
configData[key] = val
}
return nil
}
// attachSdcGUID injects the sdc guid node label value into config
func attachSdcGUID(plug *sioPlugin, conf map[string]string) error {
guid, err := getSdcGUIDLabel(plug)
if err != nil {
return err
}
conf[confKey.sdcGUID] = guid
return nil
}
// getSdcGUIDLabel fetches the scaleio.sdcGuid node label
// associated with the node executing this code.
func getSdcGUIDLabel(plug *sioPlugin) (string, error) {
nodeLabels, err := plug.host.GetNodeLabels()
if err != nil {
return "", err
}
label, ok := nodeLabels[sdcGUIDLabelName]
if !ok {
klog.V(4).Info(log("node label %s not found", sdcGUIDLabelName))
return "", nil
}
klog.V(4).Info(log("found node label %s=%s", sdcGUIDLabelName, label))
return label, nil
}
// getVolumeSourceFromSpec safely extracts ScaleIOVolumeSource or ScaleIOPersistentVolumeSource from spec
func getVolumeSourceFromSpec(spec *volume.Spec) (interface{}, error) {
if spec.Volume != nil && spec.Volume.ScaleIO != nil {
return spec.Volume.ScaleIO, nil
}
if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.ScaleIO != nil {
return spec.PersistentVolume.Spec.ScaleIO, nil
}
return nil, fmt.Errorf("ScaleIO not defined in spec")
}
func getVolumeSourceAttribs(spec *volume.Spec) (*volSourceAttribs, error) {
attribs := new(volSourceAttribs)
if pvSource, err := getScaleIOPersistentVolumeSourceFromSpec(spec); err == nil {
attribs.volName = pvSource.VolumeName
attribs.fsType = pvSource.FSType
attribs.readOnly = pvSource.ReadOnly
} else if pSource, err := getScaleIOVolumeSourceFromSpec(spec); err == nil {
attribs.volName = pSource.VolumeName
attribs.fsType = pSource.FSType
attribs.readOnly = pSource.ReadOnly
} else {
msg := log("failed to get ScaleIOVolumeSource or ScaleIOPersistentVolumeSource from spec")
klog.Error(msg)
return nil, errors.New(msg)
}
return attribs, nil
}
func getScaleIOPersistentVolumeSourceFromSpec(spec *volume.Spec) (*api.ScaleIOPersistentVolumeSource, error) {
source, err := getVolumeSourceFromSpec(spec)
if err != nil {
return nil, err
}
if val, ok := source.(*api.ScaleIOPersistentVolumeSource); ok {
return val, nil
}
return nil, fmt.Errorf("spec is not a valid ScaleIOPersistentVolume type")
}
func getScaleIOVolumeSourceFromSpec(spec *volume.Spec) (*api.ScaleIOVolumeSource, error) {
source, err := getVolumeSourceFromSpec(spec)
if err != nil {
return nil, err
}
if val, ok := source.(*api.ScaleIOVolumeSource); ok {
return val, nil
}
return nil, fmt.Errorf("spec is not a valid ScaleIOVolume type")
}
func getSecretAndNamespaceFromSpec(spec *volume.Spec, pod *api.Pod) (secretName string, secretNS string, err error) {
if source, err := getScaleIOVolumeSourceFromSpec(spec); err == nil {
secretName = source.SecretRef.Name
if pod != nil {
secretNS = pod.Namespace
}
} else if source, err := getScaleIOPersistentVolumeSourceFromSpec(spec); err == nil {
if source.SecretRef != nil {
secretName = source.SecretRef.Name
secretNS = source.SecretRef.Namespace
if secretNS == "" && pod != nil {
secretNS = pod.Namespace
}
}
} else {
return "", "", errors.New("failed to get ScaleIOVolumeSource or ScaleIOPersistentVolumeSource")
}
return secretName, secretNS, nil
}
func log(msg string, parts ...interface{}) string {
return fmt.Sprintf(fmt.Sprintf("scaleio: %s", msg), parts...)
}

View File

@ -1,224 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleio
import (
"encoding/gob"
"os"
"path"
"reflect"
"testing"
api "k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
)
var (
vol = &api.Volume{
Name: testSioVolName,
VolumeSource: api.VolumeSource{
ScaleIO: &api.ScaleIOVolumeSource{
Gateway: "http://test.scaleio:1111",
System: "sio",
ProtectionDomain: "defaultPD",
StoragePool: "defaultSP",
VolumeName: "test-vol",
FSType: "ext4",
SecretRef: &api.LocalObjectReference{Name: "test-secret"},
},
},
}
config = map[string]string{
confKey.system: "sio",
confKey.gateway: "http://sio/",
confKey.volSpecName: testSioVolName,
confKey.volumeName: "sio-vol",
confKey.secretName: "sio-secret",
confKey.protectionDomain: "defaultPD",
confKey.storagePool: "deraultSP",
confKey.fsType: "xfs",
confKey.readOnly: "true",
}
testConfigFile = "conf.dat"
)
func TestUtilMapVolumeSource(t *testing.T) {
data := make(map[string]string)
mapVolumeSpec(data, volume.NewSpecFromVolume(vol))
if data[confKey.gateway] != "http://test.scaleio:1111" {
t.Error("Unexpected gateway value")
}
if data[confKey.system] != "sio" {
t.Error("Unexpected system value")
}
if data[confKey.protectionDomain] != "defaultPD" {
t.Error("Unexpected protection domain value")
}
if data[confKey.storagePool] != "defaultSP" {
t.Error("Unexpected storage pool value")
}
if data[confKey.volumeName] != "test-vol" {
t.Error("Unexpected volume name value")
}
if data[confKey.fsType] != "ext4" {
t.Error("Unexpected fstype value")
}
if data[confKey.sslEnabled] != "false" {
t.Error("Unexpected sslEnabled value")
}
if data[confKey.readOnly] != "false" {
t.Error("Unexpected readOnly value: ", data[confKey.readOnly])
}
}
func TestUtilValidateConfigs(t *testing.T) {
data := map[string]string{
confKey.secretName: "sio-secret",
confKey.system: "sio",
}
if err := validateConfigs(data); err != gatewayNotProvidedErr {
t.Error("Expecting error for missing gateway, but did not get it")
}
}
func TestUtilApplyConfigDefaults(t *testing.T) {
data := map[string]string{
confKey.system: "sio",
confKey.gateway: "http://sio/",
confKey.volumeName: "sio-vol",
confKey.secretName: "test-secret",
}
applyConfigDefaults(data)
if data[confKey.gateway] != "http://sio/" {
t.Error("Unexpected gateway value")
}
if data[confKey.system] != "sio" {
t.Error("Unexpected system value")
}
if data[confKey.protectionDomain] != "" {
t.Error("Unexpected protection domain value")
}
if data[confKey.storagePool] != "" {
t.Error("Unexpected storage pool value")
}
if data[confKey.volumeName] != "sio-vol" {
t.Error("Unexpected volume name value")
}
if data[confKey.fsType] != "xfs" {
t.Error("Unexpected fstype value")
}
if data[confKey.storageMode] != "ThinProvisioned" {
t.Error("Unexpected storage mode value")
}
if data[confKey.secretName] != "test-secret" {
t.Error("Unexpected secret ref value")
}
if data[confKey.sslEnabled] != "false" {
t.Error("Unexpected sslEnabled value")
}
if data[confKey.readOnly] != "false" {
t.Error("Unexpected readOnly value: ", data[confKey.readOnly])
}
}
func TestUtilDefaultString(t *testing.T) {
if defaultString("", "foo") != "foo" {
t.Error("Unexpected value for default value")
}
}
func TestUtilSaveConfig(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("scaleio-test")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
config := path.Join(tmpDir, testConfigFile)
data := map[string]string{
confKey.gateway: "https://test-gateway/",
confKey.secretName: "sio-secret",
confKey.sslEnabled: "false",
}
if err := saveConfig(config, data); err != nil {
t.Fatalf("failed while saving data: %v", err)
}
file, err := os.Open(config)
if err != nil {
t.Fatalf("failed to open conf file %s: %v", config, err)
}
defer file.Close()
dataRcvd := map[string]string{}
if err := gob.NewDecoder(file).Decode(&dataRcvd); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(data, dataRcvd) {
t.Error("we got problem, config data not the same")
}
}
func TestUtilAttachSecret(t *testing.T) {
plugMgr, tmpDir := newPluginMgr(t, makeScaleIOSecret(testSecret, testns))
defer os.RemoveAll(tmpDir)
plug, err := plugMgr.FindPluginByName(sioPluginName)
if err != nil {
t.Errorf("Can't find the plugin %v", sioPluginName)
}
sioPlug, ok := plug.(*sioPlugin)
if !ok {
t.Errorf("Cannot assert plugin to be type sioPlugin")
}
data := make(map[string]string)
for k, v := range config {
data[k] = v
}
if err := attachSecret(sioPlug, "default", data); err != nil {
t.Errorf("failed to setupConfigData %v", err)
}
if data[confKey.username] == "" {
t.Errorf("failed to merge secret")
}
}
func TestUtilLoadConfig(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("scaleio-test")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
configFile := path.Join(tmpDir, sioConfigFileName)
if err := saveConfig(configFile, config); err != nil {
t.Fatalf("failed to save configFile %s error:%v", configFile, err)
}
dataRcvd, err := loadConfig(configFile)
if err != nil {
t.Fatalf("failed to load configFile %s error:%v", configFile, err)
}
if dataRcvd[confKey.gateway] != config[confKey.gateway] ||
dataRcvd[confKey.system] != config[confKey.system] {
t.Fatal("loaded config data not matching saved config data")
}
}

View File

@ -1,527 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleio
import (
"fmt"
"os"
"path"
"strconv"
"strings"
api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
utilstrings "k8s.io/utils/strings"
)
type sioVolume struct {
sioMgr *sioMgr
plugin *sioPlugin
pod *api.Pod
podUID types.UID
spec *volume.Spec
secretName string
secretNamespace string
volSpecName string
volName string
readOnly bool
fsType string
options volume.VolumeOptions
configData map[string]string
volume.MetricsNil
}
// *******************
// volume.Volume Impl
var _ volume.Volume = &sioVolume{}
// GetPath returns the path where the volume will be mounted.
func (v *sioVolume) GetPath() string {
return v.plugin.host.GetPodVolumeDir(
v.podUID,
utilstrings.EscapeQualifiedName(sioPluginName),
v.volSpecName)
}
// *************
// Mounter Impl
// *************
var _ volume.Mounter = &sioVolume{}
// CanMount checks to verify that the volume can be mounted prior to Setup.
// A nil error indicates that the volume is ready for mounitnig.
func (v *sioVolume) CanMount() error {
return nil
}
func (v *sioVolume) SetUp(fsGroup *int64) error {
return v.SetUpAt(v.GetPath(), fsGroup)
}
// SetUp bind mounts the disk global mount to the volume path.
func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error {
v.plugin.volumeMtx.LockKey(v.volSpecName)
defer v.plugin.volumeMtx.UnlockKey(v.volSpecName)
klog.V(4).Info(log("setting up volume for PV.spec %s", v.volSpecName))
if err := v.setSioMgr(); err != nil {
klog.Error(log("setup failed to create scalio manager: %v", err))
return err
}
mounter := v.plugin.host.GetMounter(v.plugin.GetPluginName())
notDevMnt, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
klog.Error(log("IsLikelyNotMountPoint test failed for dir %v", dir))
return err
}
if !notDevMnt {
klog.V(4).Info(log("skipping setup, dir %s already a mount point", v.volName))
return nil
}
// should multiple-mapping be enabled
enableMultiMaps := false
isROM := false
if v.spec.PersistentVolume != nil {
ams := v.spec.PersistentVolume.Spec.AccessModes
for _, am := range ams {
if am == api.ReadOnlyMany {
enableMultiMaps = true
isROM = true
}
}
}
klog.V(4).Info(log("multiple mapping enabled = %v", enableMultiMaps))
volName := v.volName
devicePath, err := v.sioMgr.AttachVolume(volName, enableMultiMaps)
if err != nil {
klog.Error(log("setup of volume %v: %v", v.volSpecName, err))
return err
}
options := []string{}
switch {
default:
options = append(options, "rw")
case isROM && !v.readOnly:
options = append(options, "rw")
case isROM:
options = append(options, "ro")
case v.readOnly:
options = append(options, "ro")
}
klog.V(4).Info(log("mounting device %s -> %s", devicePath, dir))
if err := os.MkdirAll(dir, 0750); err != nil {
klog.Error(log("failed to create dir %#v: %v", dir, err))
return err
}
klog.V(4).Info(log("setup created mount point directory %s", dir))
diskMounter := util.NewSafeFormatAndMountFromHost(v.plugin.GetPluginName(), v.plugin.host)
err = diskMounter.FormatAndMount(devicePath, dir, v.fsType, options)
if err != nil {
klog.Error(log("mount operation failed during setup: %v", err))
if err := os.Remove(dir); err != nil && !os.IsNotExist(err) {
klog.Error(log("failed to remove dir %s during a failed mount at setup: %v", dir, err))
return err
}
return err
}
if !v.readOnly && fsGroup != nil {
klog.V(4).Info(log("applying value FSGroup ownership"))
volume.SetVolumeOwnership(v, fsGroup)
}
klog.V(4).Info(log("successfully setup PV %s: volume %s mapped as %s mounted at %s", v.volSpecName, v.volName, devicePath, dir))
return nil
}
func (v *sioVolume) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: v.readOnly,
Managed: !v.readOnly,
SupportsSELinux: true,
}
}
// **********************
// volume.Unmounter Impl
// *********************
var _ volume.Unmounter = &sioVolume{}
// TearDownAt unmounts the bind mount
func (v *sioVolume) TearDown() error {
return v.TearDownAt(v.GetPath())
}
// TearDown unmounts and remove the volume
func (v *sioVolume) TearDownAt(dir string) error {
v.plugin.volumeMtx.LockKey(v.volSpecName)
defer v.plugin.volumeMtx.UnlockKey(v.volSpecName)
mounter := v.plugin.host.GetMounter(v.plugin.GetPluginName())
dev, _, err := mount.GetDeviceNameFromMount(mounter, dir)
if err != nil {
klog.Errorf(log("failed to get reference count for volume: %s", dir))
return err
}
klog.V(4).Info(log("attempting to unmount %s", dir))
if err := mount.CleanupMountPoint(dir, mounter, false); err != nil {
klog.Error(log("teardown failed while unmounting dir %s: %v ", dir, err))
return err
}
klog.V(4).Info(log("dir %s unmounted successfully", dir))
// detach/unmap
deviceBusy, err := mounter.DeviceOpened(dev)
if err != nil {
klog.Error(log("teardown unable to get status for device %s: %v", dev, err))
return err
}
// Detach volume from node:
// use "last attempt wins" strategy to detach volume from node
// only allow volume to detach when it is not busy (not being used by other pods)
if !deviceBusy {
klog.V(4).Info(log("teardown is attempting to detach/unmap volume for PV %s", v.volSpecName))
if err := v.resetSioMgr(); err != nil {
klog.Error(log("teardown failed, unable to reset scalio mgr: %v", err))
}
volName := v.volName
if err := v.sioMgr.DetachVolume(volName); err != nil {
klog.Warning(log("warning: detaching failed for volume %s: %v", volName, err))
return nil
}
klog.V(4).Infof(log("teardown of volume %v detached successfully", volName))
}
return nil
}
// ********************
// volume.Deleter Impl
// ********************
var _ volume.Deleter = &sioVolume{}
func (v *sioVolume) Delete() error {
klog.V(4).Info(log("deleting pvc %s", v.volSpecName))
if err := v.setSioMgrFromSpec(); err != nil {
klog.Error(log("delete failed while setting sio manager: %v", err))
return err
}
err := v.sioMgr.DeleteVolume(v.volName)
if err != nil {
klog.Error(log("failed to delete volume %s: %v", v.volName, err))
return err
}
klog.V(4).Info(log("successfully deleted PV %s with volume %s", v.volSpecName, v.volName))
return nil
}
// ************************
// volume.Provisioner Impl
// ************************
var _ volume.Provisioner = &sioVolume{}
func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.TopologySelectorTerm) (*api.PersistentVolume, error) {
klog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name))
if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
}
if util.CheckPersistentVolumeClaimModeBlock(v.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", v.plugin.GetPluginName())
}
// setup volume attrributes
genName := v.generateName("k8svol", 11)
eightGig := int64(8 * volumehelpers.GiB)
capacity := v.options.PVC.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
volSizeBytes := capacity.Value()
volSizeGB := int64(volumehelpers.RoundUpToGiB(capacity))
if volSizeBytes == 0 {
return nil, fmt.Errorf("invalid volume size of 0 specified")
}
if volSizeBytes < eightGig {
eightGiBCapacity := resource.NewQuantity(eightGig, resource.BinarySI)
volSizeGB = int64(volumehelpers.RoundUpToGiB(*eightGiBCapacity))
klog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB))
}
// create sio manager
if err := v.setSioMgrFromConfig(); err != nil {
klog.Error(log("provision failed while setting up sio mgr: %v", err))
return nil, err
}
// create volume
volName := genName
vol, err := v.sioMgr.CreateVolume(volName, volSizeGB)
if err != nil {
klog.Error(log("provision failed while creating volume: %v", err))
return nil, err
}
// prepare data for pv
v.configData[confKey.volumeName] = volName
sslEnabled, err := strconv.ParseBool(v.configData[confKey.sslEnabled])
if err != nil {
klog.Warning(log("failed to parse parameter sslEnabled, setting to false"))
sslEnabled = false
}
readOnly, err := strconv.ParseBool(v.configData[confKey.readOnly])
if err != nil {
klog.Warning(log("failed to parse parameter readOnly, setting it to false"))
readOnly = false
}
// describe created pv
pvName := genName
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: pvName,
Namespace: v.options.PVC.Namespace,
Labels: map[string]string{},
Annotations: map[string]string{
util.VolumeDynamicallyCreatedByKey: "scaleio-dynamic-provisioner",
},
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy,
AccessModes: v.options.PVC.Spec.AccessModes,
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(
fmt.Sprintf("%dGi", volSizeGB),
),
},
PersistentVolumeSource: api.PersistentVolumeSource{
ScaleIO: &api.ScaleIOPersistentVolumeSource{
Gateway: v.configData[confKey.gateway],
SSLEnabled: sslEnabled,
SecretRef: &api.SecretReference{Name: v.secretName, Namespace: v.secretNamespace},
System: v.configData[confKey.system],
ProtectionDomain: v.configData[confKey.protectionDomain],
StoragePool: v.configData[confKey.storagePool],
StorageMode: v.configData[confKey.storageMode],
VolumeName: volName,
FSType: v.configData[confKey.fsType],
ReadOnly: readOnly,
},
},
},
}
if len(v.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = v.plugin.GetAccessModes()
}
klog.V(4).Info(log("provisioner created pv %v and volume %s successfully", pvName, vol.Name))
return pv, nil
}
// setSioMgr creates scaleio mgr from cached config data if found
// otherwise, setups new config data and create mgr
func (v *sioVolume) setSioMgr() error {
klog.V(4).Info(log("setting up sio mgr for spec %s", v.volSpecName))
podDir := v.plugin.host.GetPodPluginDir(v.podUID, sioPluginName)
configName := path.Join(podDir, sioConfigFileName)
if v.sioMgr == nil {
configData, err := loadConfig(configName) // try to load config if exist
if err != nil {
if !os.IsNotExist(err) {
klog.Error(log("failed to load config %s : %v", configName, err))
return err
}
klog.V(4).Info(log("previous config file not found, creating new one"))
// prepare config data
configData = make(map[string]string)
mapVolumeSpec(configData, v.spec)
// additional config data
configData[confKey.secretNamespace] = v.secretNamespace
configData[confKey.secretName] = v.secretName
configData[confKey.volSpecName] = v.volSpecName
if err := validateConfigs(configData); err != nil {
klog.Error(log("config setup failed: %s", err))
return err
}
// persist config
if err := saveConfig(configName, configData); err != nil {
klog.Error(log("failed to save config data: %v", err))
return err
}
}
// merge in secret
if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil {
klog.Error(log("failed to load secret: %v", err))
return err
}
// merge in Sdc Guid label value
if err := attachSdcGUID(v.plugin, configData); err != nil {
klog.Error(log("failed to retrieve sdc guid: %v", err))
return err
}
mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName()))
if err != nil {
klog.Error(log("failed to reset sio manager: %v", err))
return err
}
v.sioMgr = mgr
}
return nil
}
// resetSioMgr creates scaleio manager from existing (cached) config data
func (v *sioVolume) resetSioMgr() error {
podDir := v.plugin.host.GetPodPluginDir(v.podUID, sioPluginName)
configName := path.Join(podDir, sioConfigFileName)
if v.sioMgr == nil {
// load config data from disk
configData, err := loadConfig(configName)
if err != nil {
klog.Error(log("failed to load config data: %v", err))
return err
}
v.secretName = configData[confKey.secretName]
v.secretNamespace = configData[confKey.secretNamespace]
v.volName = configData[confKey.volumeName]
v.volSpecName = configData[confKey.volSpecName]
// attach secret
if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil {
klog.Error(log("failed to load secret: %v", err))
return err
}
// merge in Sdc Guid label value
if err := attachSdcGUID(v.plugin, configData); err != nil {
klog.Error(log("failed to retrieve sdc guid: %v", err))
return err
}
mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName()))
if err != nil {
klog.Error(log("failed to reset scaleio mgr: %v", err))
return err
}
v.sioMgr = mgr
}
return nil
}
// setSioFromConfig sets up scaleio mgr from an available config data map
// designed to be called from dynamic provisioner
func (v *sioVolume) setSioMgrFromConfig() error {
klog.V(4).Info(log("setting scaleio mgr from available config"))
if v.sioMgr == nil {
applyConfigDefaults(v.configData)
v.configData[confKey.volSpecName] = v.volSpecName
if err := validateConfigs(v.configData); err != nil {
klog.Error(log("config data setup failed: %s", err))
return err
}
// copy config and attach secret
data := map[string]string{}
for k, v := range v.configData {
data[k] = v
}
if err := attachSecret(v.plugin, v.secretNamespace, data); err != nil {
klog.Error(log("failed to load secret: %v", err))
return err
}
mgr, err := newSioMgr(data, v.plugin.host.GetExec(v.plugin.GetPluginName()))
if err != nil {
klog.Error(log("failed while setting scaleio mgr from config: %v", err))
return err
}
v.sioMgr = mgr
}
return nil
}
// setSioMgrFromSpec sets the scaleio manager from a spec object.
// The spec may be complete or incomplete depending on lifecycle phase.
func (v *sioVolume) setSioMgrFromSpec() error {
klog.V(4).Info(log("setting sio manager from spec"))
if v.sioMgr == nil {
// get config data form spec volume source
configData := map[string]string{}
mapVolumeSpec(configData, v.spec)
// additional config
configData[confKey.secretNamespace] = v.secretNamespace
configData[confKey.secretName] = v.secretName
configData[confKey.volSpecName] = v.volSpecName
if err := validateConfigs(configData); err != nil {
klog.Error(log("config setup failed: %s", err))
return err
}
// attach secret object to config data
if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil {
klog.Error(log("failed to load secret: %v", err))
return err
}
mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName()))
if err != nil {
klog.Error(log("failed to reset sio manager: %v", err))
return err
}
v.sioMgr = mgr
}
return nil
}
func (v *sioVolume) generateName(prefix string, size int) string {
return fmt.Sprintf("%s-%s", prefix, strings.Replace(string(uuid.NewUUID()), "-", "", -1)[0:size])
}

View File

@ -1,535 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scaleio
import (
"fmt"
"os"
"path"
"strings"
"testing"
"k8s.io/klog"
api "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
fakeclient "k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
var (
testSioSystem = "sio"
testSioPD = "default"
testSioVol = "vol-0001"
testns = "default"
testSecret = "sio-secret"
testSioVolName = fmt.Sprintf("%s%s%s", testns, "-", testSioVol)
podUID = types.UID("sio-pod")
)
func newPluginMgr(t *testing.T, apiObject runtime.Object) (*volume.VolumePluginMgr, string) {
tmpDir, err := utiltesting.MkTmpdir("scaleio-test")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
fakeClient := fakeclient.NewSimpleClientset(apiObject)
host := volumetest.NewFakeVolumeHostWithNodeLabels(
tmpDir,
fakeClient,
nil,
map[string]string{sdcGUIDLabelName: "abc-123"},
)
plugMgr := &volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
return plugMgr, tmpDir
}
func makeScaleIOSecret(name, namespace string) *api.Secret {
return &api.Secret{
ObjectMeta: meta.ObjectMeta{
Name: name,
Namespace: namespace,
UID: "1234567890",
},
Type: api.SecretType("kubernetes.io/scaleio"),
Data: map[string][]byte{
"username": []byte("username"),
"password": []byte("password"),
},
}
}
func TestVolumeCanSupport(t *testing.T) {
plugMgr, tmpDir := newPluginMgr(t, makeScaleIOSecret(testSecret, testns))
defer os.RemoveAll(tmpDir)
plug, err := plugMgr.FindPluginByName(sioPluginName)
if err != nil {
t.Errorf("Can't find the plugin %s by name", sioPluginName)
}
if plug.GetPluginName() != "kubernetes.io/scaleio" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(
&volume.Spec{
Volume: &api.Volume{
VolumeSource: api.VolumeSource{
ScaleIO: &api.ScaleIOVolumeSource{},
},
},
},
) {
t.Errorf("Expected true for CanSupport LibStorage VolumeSource")
}
if !plug.CanSupport(
&volume.Spec{
PersistentVolume: &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
ScaleIO: &api.ScaleIOPersistentVolumeSource{},
},
},
},
},
) {
t.Errorf("Expected true for CanSupport LibStorage PersistentVolumeSource")
}
}
func TestVolumeGetAccessModes(t *testing.T) {
plugMgr, tmpDir := newPluginMgr(t, makeScaleIOSecret(testSecret, testns))
defer os.RemoveAll(tmpDir)
plug, err := plugMgr.FindPersistentPluginByName(sioPluginName)
if err != nil {
t.Errorf("Can't find the plugin %v", sioPluginName)
}
if !containsMode(plug.GetAccessModes(), api.ReadWriteOnce) {
t.Errorf("Expected two AccessModeTypes: %s or %s", api.ReadWriteOnce, api.ReadOnlyMany)
}
}
func containsMode(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
func TestVolumeMounterUnmounter(t *testing.T) {
plugMgr, tmpDir := newPluginMgr(t, makeScaleIOSecret(testSecret, testns))
defer os.RemoveAll(tmpDir)
plug, err := plugMgr.FindPluginByName(sioPluginName)
if err != nil {
t.Errorf("Can't find the plugin %v", sioPluginName)
}
sioPlug, ok := plug.(*sioPlugin)
if !ok {
t.Errorf("Cannot assert plugin to be type sioPlugin")
}
vol := &api.Volume{
Name: testSioVolName,
VolumeSource: api.VolumeSource{
ScaleIO: &api.ScaleIOVolumeSource{
Gateway: "http://test.scaleio:1111",
System: testSioSystem,
ProtectionDomain: testSioPD,
StoragePool: "default",
VolumeName: testSioVol,
FSType: "ext4",
SecretRef: &api.LocalObjectReference{Name: testSecret},
ReadOnly: false,
},
},
}
sioMounter, err := sioPlug.NewMounter(
volume.NewSpecFromVolume(vol),
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if sioMounter == nil {
t.Fatal("Got a nil Mounter")
}
sio := newFakeSio()
sioVol := sioMounter.(*sioVolume)
if err := sioVol.setSioMgr(); err != nil {
t.Fatalf("failed to create sio mgr: %v", err)
}
sioVol.sioMgr.client = sio
sioVol.sioMgr.CreateVolume(testSioVol, 8) //create vol ahead of time
volPath := path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~scaleio/%s", podUID, testSioVolName))
path := sioMounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := sioMounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if sio.isMultiMap {
t.Errorf("SetUp() - expecting multiple volume disabled by default")
}
// did we read sdcGUID label
if _, ok := sioVol.sioMgr.configData[confKey.sdcGUID]; !ok {
t.Errorf("Expected to find node label scaleio.sdcGUID, but did not find it")
}
// rebuild spec
builtSpec, err := sioPlug.ConstructVolumeSpec(volume.NewSpecFromVolume(vol).Name(), path)
if err != nil {
t.Errorf("ConstructVolumeSpec failed %v", err)
}
if builtSpec.Name() != vol.Name {
t.Errorf("Unexpected spec name %s", builtSpec.Name())
}
// unmount
sioUnmounter, err := sioPlug.NewUnmounter(volume.NewSpecFromVolume(vol).Name(), podUID)
if err != nil {
t.Fatalf("Failed to make a new Unmounter: %v", err)
}
if sioUnmounter == nil {
t.Fatal("Got a nil Unmounter")
}
sioVol = sioUnmounter.(*sioVolume)
if err := sioVol.resetSioMgr(); err != nil {
t.Fatalf("failed to reset sio mgr: %v", err)
}
sioVol.sioMgr.client = sio
if err := sioUnmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
// is mount point gone ?
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
// are we still mapped
if sio.volume.MappedSdcInfo != nil {
t.Errorf("expected SdcMappedInfo to be nil, volume may still be mapped")
}
}
func TestVolumeProvisioner(t *testing.T) {
plugMgr, tmpDir := newPluginMgr(t, makeScaleIOSecret(testSecret, testns))
defer os.RemoveAll(tmpDir)
plug, err := plugMgr.FindPluginByName(sioPluginName)
if err != nil {
t.Fatalf("Can't find the plugin %v", sioPluginName)
}
sioPlug, ok := plug.(*sioPlugin)
if !ok {
t.Fatal("Cannot assert plugin to be type sioPlugin")
}
options := volume.VolumeOptions{
ClusterName: "testcluster",
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
}
options.PVC.Name = "testpvc"
options.PVC.Namespace = testns
options.PVC.Spec.AccessModes = []api.PersistentVolumeAccessMode{
api.ReadOnlyMany,
}
options.Parameters = map[string]string{
confKey.gateway: "http://test.scaleio:11111",
confKey.system: "sio",
confKey.protectionDomain: testSioPD,
confKey.storagePool: "default",
confKey.secretName: testSecret,
}
provisioner, err := sioPlug.NewProvisioner(options)
if err != nil {
t.Fatalf("failed to create new provisioner: %v", err)
}
if provisioner == nil {
t.Fatal("got a nil provisioner")
}
sio := newFakeSio()
sioVol := provisioner.(*sioVolume)
if err := sioVol.setSioMgrFromConfig(); err != nil {
t.Fatalf("failed to create scaleio mgr from config: %v", err)
}
sioVol.sioMgr.client = sio
spec, err := provisioner.Provision(nil, nil)
if err != nil {
t.Fatalf("call to Provision() failed: %v", err)
}
if spec.Namespace != testns {
t.Fatalf("unexpected namespace %v", spec.Namespace)
}
if spec.Spec.ScaleIO.SecretRef == nil {
t.Fatalf("unexpected nil value for spec.SecretRef")
}
if spec.Spec.ScaleIO.SecretRef.Name != testSecret ||
spec.Spec.ScaleIO.SecretRef.Namespace != testns {
t.Fatalf("spec.SecretRef is not being set properly")
}
spec.Spec.ClaimRef = &api.ObjectReference{Namespace: testns}
// validate provision
actualSpecName := spec.Name
actualVolName := spec.Spec.PersistentVolumeSource.ScaleIO.VolumeName
if !strings.HasPrefix(actualSpecName, "k8svol-") {
t.Errorf("expecting volume name to start with k8svol-, got %s", actualSpecName)
}
vol, err := sio.FindVolume(actualVolName)
if err != nil {
t.Fatalf("failed getting volume %v: %v", actualVolName, err)
}
if vol.Name != actualVolName {
t.Errorf("expected volume name to be %s, got %s", actualVolName, vol.Name)
}
if vol.SizeInKb != 8*1024*1024 {
klog.V(4).Info(log("unexpected volume size"))
}
// mount dynamic vol
sioMounter, err := sioPlug.NewMounter(
volume.NewSpecFromPersistentVolume(spec, false),
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}},
volume.VolumeOptions{},
)
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
sioVol = sioMounter.(*sioVolume)
if err := sioVol.setSioMgr(); err != nil {
t.Fatalf("failed to create sio mgr: %v", err)
}
sioVol.sioMgr.client = sio
if err := sioMounter.SetUp(nil); err != nil {
t.Fatalf("Expected success, got: %v", err)
}
// did we read sdcGUID label
if _, ok := sioVol.sioMgr.configData[confKey.sdcGUID]; !ok {
t.Errorf("Expected to find node label scaleio.sdcGUID, but did not find it")
}
// isMultiMap applied
if !sio.isMultiMap {
t.Errorf("SetUp() expecting attached volume with multi-mapping")
}
// teardown dynamic vol
sioUnmounter, err := sioPlug.NewUnmounter(spec.Name, podUID)
if err != nil {
t.Fatalf("Failed to make a new Unmounter: %v", err)
}
sioVol = sioUnmounter.(*sioVolume)
if err := sioVol.resetSioMgr(); err != nil {
t.Fatalf("failed to reset sio mgr: %v", err)
}
sioVol.sioMgr.client = sio
if err := sioUnmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
// test deleter
deleter, err := sioPlug.NewDeleter(volume.NewSpecFromPersistentVolume(spec, false))
if err != nil {
t.Fatalf("failed to create a deleter %v", err)
}
sioVol = deleter.(*sioVolume)
if err := sioVol.setSioMgrFromSpec(); err != nil {
t.Fatalf("failed to set sio mgr: %v", err)
}
sioVol.sioMgr.client = sio
if err := deleter.Delete(); err != nil {
t.Fatalf("failed while deleting vol: %v", err)
}
path := deleter.GetPath()
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("Deleter did not delete path %v: %v", path, err)
}
}
func TestVolumeProvisionerWithIncompleteConfig(t *testing.T) {
plugMgr, tmpDir := newPluginMgr(t, makeScaleIOSecret(testSecret, testns))
defer os.RemoveAll(tmpDir)
plug, err := plugMgr.FindPluginByName(sioPluginName)
if err != nil {
t.Fatalf("Can't find the plugin %v", sioPluginName)
}
sioPlug, ok := plug.(*sioPlugin)
if !ok {
t.Fatal("Cannot assert plugin to be type sioPlugin")
}
options := volume.VolumeOptions{
ClusterName: "testcluster",
PVName: "pvc-sio-dynamic-vol",
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
}
options.PVC.Namespace = testns
options.PVC.Spec.AccessModes = []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
}
// incomplete options, test should fail
_, err = sioPlug.NewProvisioner(options)
if err == nil {
t.Fatal("expected failure due to incomplete options")
}
}
func TestVolumeProvisionerWithZeroCapacity(t *testing.T) {
plugMgr, tmpDir := newPluginMgr(t, makeScaleIOSecret(testSecret, testns))
defer os.RemoveAll(tmpDir)
plug, err := plugMgr.FindPluginByName(sioPluginName)
if err != nil {
t.Fatalf("Can't find the plugin %v", sioPluginName)
}
sioPlug, ok := plug.(*sioPlugin)
if !ok {
t.Fatal("Cannot assert plugin to be type sioPlugin")
}
options := volume.VolumeOptions{
ClusterName: "testcluster",
PVName: "pvc-sio-dynamic-vol",
PVC: volumetest.CreateTestPVC("0Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
}
options.PVC.Namespace = testns
options.PVC.Spec.AccessModes = []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
}
options.Parameters = map[string]string{
confKey.gateway: "http://test.scaleio:11111",
confKey.system: "sio",
confKey.protectionDomain: testSioPD,
confKey.storagePool: "default",
confKey.secretName: "sio-secret",
}
provisioner, _ := sioPlug.NewProvisioner(options)
sio := newFakeSio()
sioVol := provisioner.(*sioVolume)
if err := sioVol.setSioMgrFromConfig(); err != nil {
t.Fatalf("failed to create scaleio mgr from config: %v", err)
}
sioVol.sioMgr.client = sio
_, err = provisioner.Provision(nil, nil)
if err == nil {
t.Fatalf("call to Provision() should fail with invalid capacity")
}
}
func TestVolumeProvisionerWithSecretNamespace(t *testing.T) {
plugMgr, tmpDir := newPluginMgr(t, makeScaleIOSecret("sio-sec", "sio-ns"))
defer os.RemoveAll(tmpDir)
plug, err := plugMgr.FindPluginByName(sioPluginName)
if err != nil {
t.Fatalf("Can't find the plugin %v", sioPluginName)
}
sioPlug, ok := plug.(*sioPlugin)
if !ok {
t.Fatal("Cannot assert plugin to be type sioPlugin")
}
options := volume.VolumeOptions{
ClusterName: "testcluster",
PVName: "pvc-sio-dynamic-vol",
PVC: volumetest.CreateTestPVC("100Mi", []api.PersistentVolumeAccessMode{api.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
}
options.PVC.Spec.AccessModes = []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
}
options.PVC.Namespace = "pvc-ns"
options.Parameters = map[string]string{
confKey.gateway: "http://test.scaleio:11111",
confKey.system: "sio",
confKey.protectionDomain: testSioPD,
confKey.storagePool: "default",
confKey.secretName: "sio-sec",
confKey.secretNamespace: "sio-ns",
}
provisioner, _ := sioPlug.NewProvisioner(options)
sio := newFakeSio()
sioVol := provisioner.(*sioVolume)
if err := sioVol.setSioMgrFromConfig(); err != nil {
t.Fatalf("failed to create scaleio mgr from config: %v", err)
}
sioVol.sioMgr.client = sio
spec, err := sioVol.Provision(nil, nil)
if err != nil {
t.Fatalf("call to Provision() failed: %v", err)
}
if spec.GetObjectMeta().GetNamespace() != "pvc-ns" {
t.Fatalf("unexpected spec.namespace %s", spec.GetObjectMeta().GetNamespace())
}
if spec.Spec.ScaleIO.SecretRef.Name != "sio-sec" {
t.Fatalf("unexpected spec.ScaleIOPersistentVolume.SecretRef.Name %v", spec.Spec.ScaleIO.SecretRef.Name)
}
if spec.Spec.ScaleIO.SecretRef.Namespace != "sio-ns" {
t.Fatalf("unexpected spec.ScaleIOPersistentVolume.SecretRef.Namespace %v", spec.Spec.ScaleIO.SecretRef.Namespace)
}
}

View File

@ -1,65 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"storageos.go",
"storageos_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/storageos",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//vendor/github.com/storageos/go-api:go_default_library",
"//vendor/github.com/storageos/go-api/types:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/strings:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"storageos_test.go",
"storageos_util_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//vendor/github.com/storageos/go-api/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,6 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
maintainers:
- croomes
- rusenask
- chira001

View File

@ -1,19 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package storageos contains the internal representation of StorageOS
// PersistentDisk volumes.
package storageos // import "k8s.io/kubernetes/pkg/volume/storageos"

View File

@ -1,773 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storageos
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
utilstrings "k8s.io/utils/strings"
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&storageosPlugin{nil}}
}
type storageosPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &storageosPlugin{}
var _ volume.PersistentVolumePlugin = &storageosPlugin{}
var _ volume.DeletableVolumePlugin = &storageosPlugin{}
var _ volume.ProvisionableVolumePlugin = &storageosPlugin{}
const (
storageosPluginName = "kubernetes.io/storageos"
defaultDeviceDir = "/var/lib/storageos/volumes"
defaultAPIAddress = "tcp://localhost:5705"
defaultAPIUser = "storageos"
defaultAPIPassword = "storageos"
defaultAPIVersion = "1"
defaultFSType = "ext4"
defaultNamespace = "default"
)
func getPath(uid types.UID, volNamespace string, volName string, pvName string, host volume.VolumeHost) string {
if len(volNamespace) != 0 && len(volName) != 0 && strings.Count(volName, ".") == 0 {
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(storageosPluginName), pvName+"."+volNamespace+"."+volName)
}
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(storageosPluginName), pvName)
}
func (plugin *storageosPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *storageosPlugin) GetPluginName() string {
return storageosPluginName
}
func (plugin *storageosPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return fmt.Sprintf("%s/%s", volumeSource.VolumeNamespace, volumeSource.VolumeName), nil
}
func (plugin *storageosPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.StorageOS != nil) ||
(spec.Volume != nil && spec.Volume.StorageOS != nil)
}
func (plugin *storageosPlugin) IsMigratedToCSI() bool {
return false
}
func (plugin *storageosPlugin) RequiresRemount() bool {
return false
}
func (plugin *storageosPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
func (plugin *storageosPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
apiCfg, err := getAPICfg(spec, pod, plugin.host.GetKubeClient())
if err != nil {
return nil, err
}
return plugin.newMounterInternal(spec, pod, apiCfg, &storageosUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *storageosPlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, apiCfg *storageosAPIConfig, manager storageosManager, mounter mount.Interface, exec mount.Exec) (volume.Mounter, error) {
volName, volNamespace, fsType, readOnly, err := getVolumeInfoFromSpec(spec)
if err != nil {
return nil, err
}
return &storageosMounter{
storageos: &storageos{
podUID: pod.UID,
podNamespace: pod.GetNamespace(),
pvName: spec.Name(),
volName: volName,
volNamespace: volNamespace,
fsType: fsType,
readOnly: readOnly,
apiCfg: apiCfg,
manager: manager,
mounter: mounter,
exec: exec,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(pod.UID, volNamespace, volName, spec.Name(), plugin.host)),
},
diskMounter: &mount.SafeFormatAndMount{Interface: mounter, Exec: exec},
mountOptions: util.MountOptionFromSpec(spec),
}, nil
}
func (plugin *storageosPlugin) NewUnmounter(pvName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(pvName, podUID, &storageosUtil{}, plugin.host.GetMounter(plugin.GetPluginName()), plugin.host.GetExec(plugin.GetPluginName()))
}
func (plugin *storageosPlugin) newUnmounterInternal(pvName string, podUID types.UID, manager storageosManager, mounter mount.Interface, exec mount.Exec) (volume.Unmounter, error) {
// Parse volume namespace & name from mountpoint if mounted
volNamespace, volName, err := getVolumeInfo(pvName, podUID, plugin.host)
if err != nil {
return nil, err
}
return &storageosUnmounter{
storageos: &storageos{
podUID: podUID,
pvName: pvName,
volName: volName,
volNamespace: volNamespace,
manager: manager,
mounter: mounter,
exec: exec,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volNamespace, volName, pvName, plugin.host)),
},
}, nil
}
func (plugin *storageosPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.StorageOS == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.StorageOS is nil")
}
class, err := util.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
if err != nil {
return nil, err
}
var adminSecretName, adminSecretNamespace string
for k, v := range class.Parameters {
switch strings.ToLower(k) {
case "adminsecretname":
adminSecretName = v
case "adminsecretnamespace":
adminSecretNamespace = v
}
}
apiCfg, err := parsePVSecret(adminSecretNamespace, adminSecretName, plugin.host.GetKubeClient())
if err != nil {
return nil, fmt.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err)
}
return plugin.newDeleterInternal(spec, apiCfg, &storageosUtil{})
}
func (plugin *storageosPlugin) newDeleterInternal(spec *volume.Spec, apiCfg *storageosAPIConfig, manager storageosManager) (volume.Deleter, error) {
return &storageosDeleter{
storageosMounter: &storageosMounter{
storageos: &storageos{
pvName: spec.Name(),
volName: spec.PersistentVolume.Spec.StorageOS.VolumeName,
volNamespace: spec.PersistentVolume.Spec.StorageOS.VolumeNamespace,
apiCfg: apiCfg,
manager: manager,
plugin: plugin,
},
},
pvUID: spec.PersistentVolume.UID,
}, nil
}
func (plugin *storageosPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &storageosUtil{})
}
func (plugin *storageosPlugin) newProvisionerInternal(options volume.VolumeOptions, manager storageosManager) (volume.Provisioner, error) {
return &storageosProvisioner{
storageosMounter: &storageosMounter{
storageos: &storageos{
manager: manager,
plugin: plugin,
},
},
options: options,
}, nil
}
func (plugin *storageosPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
volNamespace, volName, err := getVolumeFromRef(volumeName)
if err != nil {
volNamespace = defaultNamespace
volName = volumeName
}
storageosVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
StorageOS: &v1.StorageOSVolumeSource{
VolumeName: volName,
VolumeNamespace: volNamespace,
},
},
}
return volume.NewSpecFromVolume(storageosVolume), nil
}
func (plugin *storageosPlugin) SupportsMountOption() bool {
return true
}
func (plugin *storageosPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func getVolumeSource(spec *volume.Spec) (*v1.StorageOSVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.StorageOS != nil {
return spec.Volume.StorageOS, spec.Volume.StorageOS.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a StorageOS volume type")
}
func getPersistentVolumeSource(spec *volume.Spec) (*v1.StorageOSPersistentVolumeSource, bool, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.StorageOS != nil {
return spec.PersistentVolume.Spec.StorageOS, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a StorageOS persistent volume type")
}
// storageosManager is the abstract interface to StorageOS volume ops.
type storageosManager interface {
// Connects to the StorageOS API using the supplied configuration.
NewAPI(apiCfg *storageosAPIConfig) error
// Creates a StorageOS volume.
CreateVolume(provisioner *storageosProvisioner) (*storageosVolume, error)
// Attaches the disk to the kubelet's host machine.
AttachVolume(mounter *storageosMounter) (string, error)
// Attaches the device to the host at a mount path.
AttachDevice(mounter *storageosMounter, deviceMountPath string) error
// Detaches the disk from the kubelet's host machine.
DetachVolume(unmounter *storageosUnmounter, dir string) error
// Mounts the disk on the Kubelet's host machine.
MountVolume(mounter *storageosMounter, mnt, dir string) error
// Unmounts the disk from the Kubelet's host machine.
UnmountVolume(unounter *storageosUnmounter) error
// Deletes the storageos volume. All data will be lost.
DeleteVolume(deleter *storageosDeleter) error
// Gets the node's device path.
DeviceDir(mounter *storageosMounter) string
}
// storageos volumes represent a bare host directory mount of an StorageOS export.
type storageos struct {
podUID types.UID
podNamespace string
pvName string
volName string
volNamespace string
secretName string
readOnly bool
description string
pool string
fsType string
sizeGB int
labels map[string]string
apiCfg *storageosAPIConfig
manager storageosManager
mounter mount.Interface
exec mount.Exec
plugin *storageosPlugin
volume.MetricsProvider
}
type storageosMounter struct {
*storageos
// The directory containing the StorageOS devices
deviceDir string
// Interface used to mount the file or block device
diskMounter *mount.SafeFormatAndMount
mountOptions []string
}
var _ volume.Mounter = &storageosMounter{}
func (b *storageosMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *storageosMounter) CanMount() error {
return nil
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *storageosMounter) SetUp(fsGroup *int64) error {
// Need a namespace to find the volume, try pod's namespace if not set.
if b.volNamespace == "" {
klog.V(2).Infof("Setting StorageOS volume namespace to pod namespace: %s", b.podNamespace)
b.volNamespace = b.podNamespace
}
targetPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName)
// Attach the device to the host.
if err := b.manager.AttachDevice(b, targetPath); err != nil {
klog.Errorf("Failed to attach device at %s: %s", targetPath, err.Error())
return err
}
// Attach the StorageOS volume as a block device
devicePath, err := b.manager.AttachVolume(b)
if err != nil {
klog.Errorf("Failed to attach StorageOS volume %s: %s", b.volName, err.Error())
return err
}
// Mount the loop device into the plugin's disk global mount dir.
err = b.manager.MountVolume(b, devicePath, targetPath)
if err != nil {
return err
}
klog.V(4).Infof("Successfully mounted StorageOS volume %s into global mount directory", b.volName)
// Bind mount the volume into the pod
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUp bind mounts the disk global mount to the give volume path.
func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
klog.V(4).Infof("StorageOS volume set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("Cannot validate mount point: %s %v", dir, err)
return err
}
if !notMnt {
return nil
}
if err = os.MkdirAll(dir, 0750); err != nil {
klog.Errorf("mkdir failed on disk %s (%v)", dir, err)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
mountOptions := util.JoinMountOptions(b.mountOptions, options)
globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName)
klog.V(4).Infof("Attempting to bind mount to pod volume at %s", dir)
err = b.mounter.Mount(globalPDPath, dir, "", mountOptions)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
klog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
klog.Errorf("Mount of disk %s failed: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
klog.V(4).Infof("StorageOS volume setup complete on %s", dir)
return nil
}
func makeGlobalPDName(host volume.VolumeHost, pvName, volNamespace, volName string) string {
return path.Join(host.GetPluginDir(utilstrings.EscapeQualifiedName(storageosPluginName)), mount.MountsInGlobalPDPath, pvName+"."+volNamespace+"."+volName)
}
// Given the pod id and PV name, finds the volume's namespace and name from the
// name or volume mount. We mount as volNamespace.pvName, but k8s will specify
// only the pvName to unmount.
// Will return empty volNamespace/pvName if the volume is not mounted.
func getVolumeInfo(pvName string, podUID types.UID, host volume.VolumeHost) (string, string, error) {
if volNamespace, volName, err := getVolumeFromRef(pvName); err == nil {
return volNamespace, volName, nil
}
volumeDir := filepath.Dir(host.GetPodVolumeDir(podUID, utilstrings.EscapeQualifiedName(storageosPluginName), pvName))
files, err := ioutil.ReadDir(volumeDir)
if err != nil {
return "", "", fmt.Errorf("Could not read mounts from pod volume dir: %s", err)
}
for _, f := range files {
if f.Mode().IsDir() && strings.HasPrefix(f.Name(), pvName+".") {
if volNamespace, volName, err := getVolumeFromRef(f.Name()); err == nil {
return volNamespace, volName, nil
}
}
}
return "", "", fmt.Errorf("Could not get info from unmounted pv %q at %q", pvName, volumeDir)
}
// Splits the volume ref on "." to return the volNamespace and pvName. Neither
// namespaces nor service names allow "." in their names.
func getVolumeFromRef(ref string) (volNamespace string, volName string, err error) {
refParts := strings.Split(ref, ".")
switch len(refParts) {
case 2:
return refParts[0], refParts[1], nil
case 3:
return refParts[1], refParts[2], nil
}
return "", "", fmt.Errorf("ref not in format volNamespace.volName or pvName.volNamespace.volName")
}
// GetPath returns the path to the user specific mount of a StorageOS volume
func (storageosVolume *storageos) GetPath() string {
return getPath(storageosVolume.podUID, storageosVolume.volNamespace, storageosVolume.volName, storageosVolume.pvName, storageosVolume.plugin.host)
}
type storageosUnmounter struct {
*storageos
}
var _ volume.Unmounter = &storageosUnmounter{}
func (b *storageosUnmounter) GetPath() string {
return getPath(b.podUID, b.volNamespace, b.volName, b.pvName, b.plugin.host)
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (b *storageosUnmounter) TearDown() error {
if len(b.volNamespace) == 0 || len(b.volName) == 0 {
klog.Warningf("volNamespace: %q, volName: %q not set, skipping TearDown", b.volNamespace, b.volName)
return fmt.Errorf("pvName not specified for TearDown, waiting for next sync loop")
}
// Unmount from pod
mountPath := b.GetPath()
err := b.TearDownAt(mountPath)
if err != nil {
klog.Errorf("Unmount from pod failed: %v", err)
return err
}
// Find device name from global mount
globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName)
devicePath, _, err := mount.GetDeviceNameFromMount(b.mounter, globalPDPath)
if err != nil {
klog.Errorf("Detach failed when getting device from global mount: %v", err)
return err
}
// Unmount from plugin's disk global mount dir.
err = b.TearDownAt(globalPDPath)
if err != nil {
klog.Errorf("Detach failed during unmount: %v", err)
return err
}
// Detach loop device
err = b.manager.DetachVolume(b, devicePath)
if err != nil {
klog.Errorf("Detach device %s failed for volume %s: %v", devicePath, b.pvName, err)
return err
}
klog.V(4).Infof("Successfully unmounted StorageOS volume %s and detached devices", b.pvName)
return nil
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (b *storageosUnmounter) TearDownAt(dir string) error {
if err := mount.CleanupMountPoint(dir, b.mounter, false); err != nil {
klog.V(4).Infof("Unmounted StorageOS volume %s failed with: %v", b.pvName, err)
}
if err := b.manager.UnmountVolume(b); err != nil {
klog.V(4).Infof("Mount reference for volume %s could not be removed from StorageOS: %v", b.pvName, err)
}
return nil
}
type storageosDeleter struct {
*storageosMounter
pvUID types.UID
}
var _ volume.Deleter = &storageosDeleter{}
func (d *storageosDeleter) GetPath() string {
return getPath(d.podUID, d.volNamespace, d.volName, d.pvName, d.plugin.host)
}
func (d *storageosDeleter) Delete() error {
return d.manager.DeleteVolume(d)
}
type storageosProvisioner struct {
*storageosMounter
options volume.VolumeOptions
}
var _ volume.Provisioner = &storageosProvisioner{}
func (c *storageosProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
if util.CheckPersistentVolumeClaimModeBlock(c.options.PVC) {
return nil, fmt.Errorf("%s does not support block volume provisioning", c.plugin.GetPluginName())
}
var adminSecretName, adminSecretNamespace string
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range c.options.Parameters {
switch strings.ToLower(k) {
case "adminsecretname":
adminSecretName = v
case "adminsecretnamespace":
adminSecretNamespace = v
case "volumenamespace":
c.volNamespace = v
case "description":
c.description = v
case "pool":
c.pool = v
case "fstype":
c.fsType = v
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
}
}
// Set from PVC
c.podNamespace = c.options.PVC.Namespace
c.volName = c.options.PVName
if c.volNamespace == "" {
c.volNamespace = c.options.PVC.Namespace
}
c.labels = make(map[string]string)
for k, v := range c.options.PVC.Labels {
c.labels[k] = v
}
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
var err error
c.sizeGB, err = volumehelpers.RoundUpToGiBInt(capacity)
if err != nil {
return nil, err
}
apiCfg, err := parsePVSecret(adminSecretNamespace, adminSecretName, c.plugin.host.GetKubeClient())
if err != nil {
return nil, err
}
c.apiCfg = apiCfg
vol, err := c.manager.CreateVolume(c)
if err != nil {
klog.Errorf("failed to create volume: %v", err)
return nil, err
}
if vol.FSType == "" {
vol.FSType = defaultFSType
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: vol.Name,
Labels: map[string]string{},
Annotations: map[string]string{
util.VolumeDynamicallyCreatedByKey: "storageos-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", vol.SizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
StorageOS: &v1.StorageOSPersistentVolumeSource{
VolumeName: vol.Name,
VolumeNamespace: vol.Namespace,
FSType: vol.FSType,
ReadOnly: false,
SecretRef: &v1.ObjectReference{
Name: adminSecretName,
Namespace: adminSecretNamespace,
},
},
},
MountOptions: c.options.MountOptions,
},
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if len(vol.Labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range vol.Labels {
pv.Labels[k] = v
}
}
return pv, nil
}
// Returns StorageOS volume name, namespace, fstype and readonly from spec
func getVolumeInfoFromSpec(spec *volume.Spec) (string, string, string, bool, error) {
if spec.PersistentVolume != nil {
source, readOnly, err := getPersistentVolumeSource(spec)
if err != nil {
return "", "", "", false, err
}
return source.VolumeName, source.VolumeNamespace, source.FSType, readOnly, nil
}
if spec.Volume != nil {
source, readOnly, err := getVolumeSource(spec)
if err != nil {
return "", "", "", false, err
}
return source.VolumeName, source.VolumeNamespace, source.FSType, readOnly, nil
}
return "", "", "", false, fmt.Errorf("spec not Volume or PersistentVolume")
}
// Returns API config if secret set, otherwise empty struct so defaults can be
// attempted.
func getAPICfg(spec *volume.Spec, pod *v1.Pod, kubeClient clientset.Interface) (*storageosAPIConfig, error) {
if spec.PersistentVolume != nil {
source, _, err := getPersistentVolumeSource(spec)
if err != nil {
return nil, err
}
if source.SecretRef == nil {
return nil, nil
}
return parsePVSecret(source.SecretRef.Namespace, source.SecretRef.Name, kubeClient)
}
if spec.Volume != nil {
source, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
if source.SecretRef == nil {
return nil, nil
}
return parsePodSecret(pod, source.SecretRef.Name, kubeClient)
}
return nil, fmt.Errorf("spec not Volume or PersistentVolume")
}
func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (*storageosAPIConfig, error) {
secret, err := util.GetSecretForPod(pod, secretName, kubeClient)
if err != nil {
klog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName)
return nil, fmt.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName)
}
return parseAPIConfig(secret)
}
// Important: Only to be called with data from a PV to avoid secrets being
// loaded from a user-suppler namespace.
func parsePVSecret(namespace, secretName string, kubeClient clientset.Interface) (*storageosAPIConfig, error) {
secret, err := util.GetSecretForPV(namespace, secretName, storageosPluginName, kubeClient)
if err != nil {
klog.Errorf("failed to get secret from [%q/%q]", namespace, secretName)
return nil, fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName)
}
return parseAPIConfig(secret)
}
// Parse API configuration from parameters or secret
func parseAPIConfig(params map[string]string) (*storageosAPIConfig, error) {
if len(params) == 0 {
return nil, fmt.Errorf("empty API config")
}
c := &storageosAPIConfig{}
for name, data := range params {
switch strings.ToLower(name) {
case "apiaddress":
c.apiAddr = string(data)
case "apiusername":
c.apiUser = string(data)
case "apipassword":
c.apiPass = string(data)
case "apiversion":
c.apiVersion = string(data)
}
}
return c, nil
}

View File

@ -1,381 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storageos
import (
"fmt"
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("storageos_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/storageos")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/storageos" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{StorageOS: &v1.StorageOSVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{StorageOS: &v1.StorageOSPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("storageos_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/storageos")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}
type fakePDManager struct {
api apiImplementer
attachCalled bool
attachDeviceCalled bool
detachCalled bool
mountCalled bool
unmountCalled bool
createCalled bool
deleteCalled bool
}
func (fake *fakePDManager) NewAPI(apiCfg *storageosAPIConfig) error {
fake.api = fakeAPI{}
return nil
}
func (fake *fakePDManager) CreateVolume(p *storageosProvisioner) (*storageosVolume, error) {
fake.createCalled = true
labels := make(map[string]string)
labels["fakepdmanager"] = "yes"
return &storageosVolume{
Name: "test-storageos-name",
Namespace: "test-storageos-namespace",
Pool: "test-storageos-pool",
SizeGB: 100,
Labels: labels,
FSType: "ext2",
}, nil
}
func (fake *fakePDManager) AttachVolume(b *storageosMounter) (string, error) {
fake.attachCalled = true
return "", nil
}
func (fake *fakePDManager) AttachDevice(b *storageosMounter, dir string) error {
fake.attachDeviceCalled = true
return nil
}
func (fake *fakePDManager) DetachVolume(b *storageosUnmounter, loopDevice string) error {
fake.detachCalled = true
return nil
}
func (fake *fakePDManager) MountVolume(b *storageosMounter, mntDevice, deviceMountPath string) error {
fake.mountCalled = true
return nil
}
func (fake *fakePDManager) UnmountVolume(b *storageosUnmounter) error {
fake.unmountCalled = true
return nil
}
func (fake *fakePDManager) DeleteVolume(d *storageosDeleter) error {
fake.deleteCalled = true
if d.volName != "test-storageos-name" {
return fmt.Errorf("Deleter got unexpected volume name: %s", d.volName)
}
return nil
}
func (fake *fakePDManager) DeviceDir(mounter *storageosMounter) string {
return defaultDeviceDir
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("storageos_test")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/storageos")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
secretName := "very-secret"
spec := &v1.Volume{
Name: "vol1-pvname",
VolumeSource: v1.VolumeSource{
StorageOS: &v1.StorageOSVolumeSource{
VolumeName: "vol1",
VolumeNamespace: "ns1",
FSType: "ext3",
SecretRef: &v1.LocalObjectReference{
Name: secretName,
},
},
},
}
client := fake.NewSimpleClientset()
client.CoreV1().Secrets("default").Create(&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: "default",
},
Type: "kubernetes.io/storageos",
Data: map[string][]byte{
"apiUsername": []byte("storageos"),
"apiPassword": []byte("storageos"),
"apiAddr": []byte("tcp://localhost:5705"),
}})
plug.(*storageosPlugin).host = volumetest.NewFakeVolumeHost(tmpDir, client, nil)
// Test Mounter
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid"), Namespace: "default"}}
fakeManager := &fakePDManager{}
apiCfg, err := parsePodSecret(pod, secretName, plug.(*storageosPlugin).host.GetKubeClient())
if err != nil {
t.Errorf("Couldn't get secret from %v/%v", pod.Namespace, secretName)
}
mounter, err := plug.(*storageosPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, apiCfg, fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil))
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
expectedPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~storageos/vol1-pvname.ns1.vol1")
volPath := mounter.GetPath()
if volPath != expectedPath {
t.Errorf("Expected path: '%s' got: '%s'", expectedPath, volPath)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volPath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volPath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if !fakeManager.attachDeviceCalled {
t.Errorf("AttachDevice not called")
}
if !fakeManager.attachCalled {
t.Errorf("Attach not called")
}
if !fakeManager.mountCalled {
t.Errorf("Mount not called")
}
// Test Unmounter
fakeManager = &fakePDManager{}
unmounter, err := plug.(*storageosPlugin).newUnmounterInternal("vol1-pvname", types.UID("poduid"), fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil))
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
volPath = unmounter.GetPath()
if volPath != expectedPath {
t.Errorf("Expected path: '%s' got: '%s'", expectedPath, volPath)
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volPath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volPath)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
if !fakeManager.unmountCalled {
t.Errorf("Unmount not called")
}
if !fakeManager.detachCalled {
t.Errorf("Detach not called")
}
// Test Provisioner
fakeManager = &fakePDManager{}
mountOptions := []string{"sync", "noatime"}
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
// PVName: "test-volume-name",
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
Parameters: map[string]string{
"VolumeNamespace": "test-volume-namespace",
"adminSecretName": secretName,
},
MountOptions: mountOptions,
}
provisioner, err := plug.(*storageosPlugin).newProvisionerInternal(options, fakeManager)
if err != nil {
t.Errorf("newProvisionerInternal() failed: %v", err)
}
persistentSpec, err := provisioner.Provision(nil, nil)
if err != nil {
t.Fatalf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.StorageOS.VolumeName != "test-storageos-name" {
t.Errorf("Provision() returned unexpected volume Name: %s, expected test-storageos-name", persistentSpec.Spec.PersistentVolumeSource.StorageOS.VolumeName)
}
if persistentSpec.Spec.PersistentVolumeSource.StorageOS.VolumeNamespace != "test-storageos-namespace" {
t.Errorf("Provision() returned unexpected volume Namespace: %s", persistentSpec.Spec.PersistentVolumeSource.StorageOS.VolumeNamespace)
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
if persistentSpec.Spec.PersistentVolumeSource.StorageOS.FSType != "ext2" {
t.Errorf("Provision() returned unexpected volume FSType: %s", persistentSpec.Spec.PersistentVolumeSource.StorageOS.FSType)
}
if len(persistentSpec.Spec.MountOptions) != 2 {
t.Errorf("Provision() returned unexpected volume mount options: %v", persistentSpec.Spec.MountOptions)
}
if persistentSpec.Labels["fakepdmanager"] != "yes" {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
}
if !fakeManager.createCalled {
t.Errorf("Create not called")
}
// Test Deleter
fakeManager = &fakePDManager{}
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*storageosPlugin).newDeleterInternal(volSpec, apiCfg, fakeManager)
if err != nil {
t.Errorf("newDeleterInternal() failed: %v", err)
}
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
if !fakeManager.deleteCalled {
t.Errorf("Delete not called")
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("storageos_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
StorageOS: &v1.StorageOSPersistentVolumeSource{VolumeName: "pvA", VolumeNamespace: "vnsA", ReadOnly: false},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(storageosPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
fakeManager := &fakePDManager{}
fakeConfig := &fakeConfig{}
apiCfg := fakeConfig.GetAPIConfig()
mounter, err := plug.(*storageosPlugin).newMounterInternal(spec, pod, apiCfg, fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil))
if err != nil {
t.Fatalf("error creating a new internal mounter:%v", err)
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}

View File

@ -1,399 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storageos
import (
"errors"
"fmt"
"os"
"path"
"strings"
"k8s.io/kubernetes/pkg/util/mount"
storageosapi "github.com/storageos/go-api"
storageostypes "github.com/storageos/go-api/types"
"k8s.io/klog"
)
const (
losetupPath = "losetup"
modeBlock deviceType = iota
modeFile
modeUnsupported
ErrDeviceNotFound = "device not found"
ErrDeviceNotSupported = "device not supported"
ErrNotAvailable = "not available"
)
type deviceType int
// storageosVolume describes a provisioned volume
type storageosVolume struct {
ID string
Name string
Namespace string
Description string
Pool string
SizeGB int
Labels map[string]string
FSType string
}
type storageosAPIConfig struct {
apiAddr string
apiUser string
apiPass string
apiVersion string
}
type apiImplementer interface {
Volume(namespace string, ref string) (*storageostypes.Volume, error)
VolumeCreate(opts storageostypes.VolumeCreateOptions) (*storageostypes.Volume, error)
VolumeMount(opts storageostypes.VolumeMountOptions) error
VolumeUnmount(opts storageostypes.VolumeUnmountOptions) error
VolumeDelete(opt storageostypes.DeleteOptions) error
Node(ref string) (*storageostypes.Node, error)
}
// storageosUtil is the utility structure to interact with the StorageOS API.
type storageosUtil struct {
api apiImplementer
}
func (u *storageosUtil) NewAPI(apiCfg *storageosAPIConfig) error {
if u.api != nil {
return nil
}
if apiCfg == nil {
apiCfg = &storageosAPIConfig{
apiAddr: defaultAPIAddress,
apiUser: defaultAPIUser,
apiPass: defaultAPIPassword,
apiVersion: defaultAPIVersion,
}
klog.V(4).Infof("Using default StorageOS API settings: addr %s, version: %s", apiCfg.apiAddr, defaultAPIVersion)
}
api, err := storageosapi.NewVersionedClient(apiCfg.apiAddr, defaultAPIVersion)
if err != nil {
return err
}
api.SetAuth(apiCfg.apiUser, apiCfg.apiPass)
u.api = api
return nil
}
// Creates a new StorageOS volume and makes it available as a device within
// /var/lib/storageos/volumes.
func (u *storageosUtil) CreateVolume(p *storageosProvisioner) (*storageosVolume, error) {
if err := u.NewAPI(p.apiCfg); err != nil {
return nil, err
}
if p.labels == nil {
p.labels = make(map[string]string)
}
opts := storageostypes.VolumeCreateOptions{
Name: p.volName,
Size: p.sizeGB,
Description: p.description,
Pool: p.pool,
FSType: p.fsType,
Namespace: p.volNamespace,
Labels: p.labels,
}
vol, err := u.api.VolumeCreate(opts)
if err != nil {
klog.Errorf("volume create failed for volume %q (%v)", opts.Name, err)
return nil, err
}
return &storageosVolume{
ID: vol.ID,
Name: vol.Name,
Namespace: vol.Namespace,
Description: vol.Description,
Pool: vol.Pool,
FSType: vol.FSType,
SizeGB: int(vol.Size),
Labels: vol.Labels,
}, nil
}
// Attach exposes a volume on the host as a block device. StorageOS uses a
// global namespace, so if the volume exists, it should already be available as
// a device within `/var/lib/storageos/volumes/<id>`.
//
// Depending on the host capabilities, the device may be either a block device
// or a file device. Block devices can be used directly, but file devices must
// be made accessible as a block device before using.
func (u *storageosUtil) AttachVolume(b *storageosMounter) (string, error) {
if err := u.NewAPI(b.apiCfg); err != nil {
return "", err
}
// Get the node's device path from the API, falling back to the default if
// not set on the node.
if b.deviceDir == "" {
b.deviceDir = u.DeviceDir(b)
}
vol, err := u.api.Volume(b.volNamespace, b.volName)
if err != nil {
klog.Warningf("volume retrieve failed for volume %q with namespace %q (%v)", b.volName, b.volNamespace, err)
return "", err
}
// Clear any existing mount reference from the API. These may be leftover
// from previous mounts where the unmount operation couldn't get access to
// the API credentials.
if vol.Mounted {
opts := storageostypes.VolumeUnmountOptions{
Name: vol.Name,
Namespace: vol.Namespace,
}
if err := u.api.VolumeUnmount(opts); err != nil {
klog.Warningf("Couldn't clear existing StorageOS mount reference: %v", err)
}
}
srcPath := path.Join(b.deviceDir, vol.ID)
dt, err := pathDeviceType(srcPath)
if err != nil {
klog.Warningf("volume source path %q for volume %q not ready (%v)", srcPath, b.volName, err)
return "", err
}
switch dt {
case modeBlock:
return srcPath, nil
case modeFile:
return attachFileDevice(srcPath, b.exec)
default:
return "", fmt.Errorf(ErrDeviceNotSupported)
}
}
// Detach detaches a volume from the host. This is only needed when NBD is not
// enabled and loop devices are used to simulate a block device.
func (u *storageosUtil) DetachVolume(b *storageosUnmounter, devicePath string) error {
if !isLoopDevice(devicePath) {
return nil
}
if _, err := os.Stat(devicePath); os.IsNotExist(err) {
return nil
}
return removeLoopDevice(devicePath, b.exec)
}
// AttachDevice attaches the volume device to the host at a given mount path.
func (u *storageosUtil) AttachDevice(b *storageosMounter, deviceMountPath string) error {
if err := u.NewAPI(b.apiCfg); err != nil {
return err
}
opts := storageostypes.VolumeMountOptions{
Name: b.volName,
Namespace: b.volNamespace,
FsType: b.fsType,
Mountpoint: deviceMountPath,
Client: b.plugin.host.GetHostName(),
}
if err := u.api.VolumeMount(opts); err != nil {
return err
}
return nil
}
// Mount mounts the volume on the host.
func (u *storageosUtil) MountVolume(b *storageosMounter, mntDevice, deviceMountPath string) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
klog.Errorf("mkdir failed on disk %s (%v)", deviceMountPath, err)
return err
}
options := []string{}
if b.readOnly {
options = append(options, "ro")
}
if notMnt {
err = b.diskMounter.FormatAndMount(mntDevice, deviceMountPath, b.fsType, options)
if err != nil {
os.Remove(deviceMountPath)
return err
}
}
return err
}
// Unmount removes the mount reference from the volume allowing it to be
// re-mounted elsewhere.
func (u *storageosUtil) UnmountVolume(b *storageosUnmounter) error {
if err := u.NewAPI(b.apiCfg); err != nil {
// We can't always get the config we need, so allow the unmount to
// succeed even if we can't remove the mount reference from the API.
klog.V(4).Infof("Could not remove mount reference in the StorageOS API as no credentials available to the unmount operation")
return nil
}
opts := storageostypes.VolumeUnmountOptions{
Name: b.volName,
Namespace: b.volNamespace,
Client: b.plugin.host.GetHostName(),
}
return u.api.VolumeUnmount(opts)
}
// Deletes a StorageOS volume. Assumes it has already been unmounted and detached.
func (u *storageosUtil) DeleteVolume(d *storageosDeleter) error {
if err := u.NewAPI(d.apiCfg); err != nil {
return err
}
// Deletes must be forced as the StorageOS API will not normally delete
// volumes that it thinks are mounted. We can't be sure the unmount was
// registered via the API so we trust k8s to only delete volumes it knows
// are unmounted.
opts := storageostypes.DeleteOptions{
Name: d.volName,
Namespace: d.volNamespace,
Force: true,
}
return u.api.VolumeDelete(opts)
}
// Get the node's device path from the API, falling back to the default if not
// specified.
func (u *storageosUtil) DeviceDir(b *storageosMounter) string {
ctrl, err := u.api.Node(b.plugin.host.GetHostName())
if err != nil {
klog.Warningf("node device path lookup failed: %v", err)
return defaultDeviceDir
}
if ctrl == nil || ctrl.DeviceDir == "" {
klog.Warningf("node device path not set, using default: %s", defaultDeviceDir)
return defaultDeviceDir
}
return ctrl.DeviceDir
}
// pathMode returns the FileMode for a path.
func pathDeviceType(path string) (deviceType, error) {
fi, err := os.Stat(path)
if err != nil {
return modeUnsupported, err
}
switch mode := fi.Mode(); {
case mode&os.ModeDevice != 0:
return modeBlock, nil
case mode.IsRegular():
return modeFile, nil
default:
return modeUnsupported, nil
}
}
// attachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func attachFileDevice(path string, exec mount.Exec) (string, error) {
blockDevicePath, err := getLoopDevice(path, exec)
if err != nil && err.Error() != ErrDeviceNotFound {
return "", err
}
// If no existing loop device for the path, create one
if blockDevicePath == "" {
klog.V(4).Infof("Creating device for path: %s", path)
blockDevicePath, err = makeLoopDevice(path, exec)
if err != nil {
return "", err
}
}
return blockDevicePath, nil
}
// Returns the full path to the loop device associated with the given path.
func getLoopDevice(path string, exec mount.Exec) (string, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return "", errors.New(ErrNotAvailable)
}
if err != nil {
return "", fmt.Errorf("not attachable: %v", err)
}
args := []string{"-j", path}
out, err := exec.Run(losetupPath, args...)
if err != nil {
klog.V(2).Infof("Failed device discover command for path %s: %v", path, err)
return "", err
}
return parseLosetupOutputForDevice(out)
}
func makeLoopDevice(path string, exec mount.Exec) (string, error) {
args := []string{"-f", "-P", "--show", path}
out, err := exec.Run(losetupPath, args...)
if err != nil {
klog.V(2).Infof("Failed device create command for path %s: %v", path, err)
return "", err
}
return parseLosetupOutputForDevice(out)
}
func removeLoopDevice(device string, exec mount.Exec) error {
args := []string{"-d", device}
out, err := exec.Run(losetupPath, args...)
if err != nil {
if !strings.Contains(string(out), "No such device or address") {
return err
}
}
return nil
}
func isLoopDevice(device string) bool {
return strings.HasPrefix(device, "/dev/loop")
}
func parseLosetupOutputForDevice(output []byte) (string, error) {
if len(output) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
// losetup returns device in the format:
// /dev/loop1: [0073]:148662 (/var/lib/storageos/volumes/308f14af-cf0a-08ff-c9c3-b48104318e05)
device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0])
if len(device) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
return device, nil
}

View File

@ -1,236 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storageos
import (
"fmt"
"os"
storageostypes "github.com/storageos/go-api/types"
"k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"testing"
)
var testAPISecretName = "storageos-api"
var testVolName = "storageos-test-vol"
var testPVName = "storageos-test-pv"
var testNamespace = "storageos-test-namespace"
var testSize = 1
var testDesc = "testdescription"
var testPool = "testpool"
var testFSType = "ext2"
var testVolUUID = "01c43d34-89f8-83d3-422b-43536a0f25e6"
type fakeConfig struct {
apiAddr string
apiUser string
apiPass string
apiVersion string
}
func (c fakeConfig) GetAPIConfig() *storageosAPIConfig {
return &storageosAPIConfig{
apiAddr: "http://5.6.7.8:9999",
apiUser: "abc",
apiPass: "123",
apiVersion: "10",
}
}
func TestClient(t *testing.T) {
util := storageosUtil{}
cfg := fakeConfig{}
err := util.NewAPI(cfg.GetAPIConfig())
if err != nil {
t.Fatalf("error getting api config: %v", err)
}
if util.api == nil {
t.Errorf("client() unexpectedly returned nil")
}
}
type fakeAPI struct{}
func (f fakeAPI) Volume(namespace string, ref string) (*storageostypes.Volume, error) {
if namespace == testNamespace && ref == testVolName {
return &storageostypes.Volume{
ID: "01c43d34-89f8-83d3-422b-43536a0f25e6",
Name: ref,
Pool: "default",
Namespace: namespace,
Size: 5,
}, nil
}
return nil, fmt.Errorf("not found")
}
func (f fakeAPI) VolumeCreate(opts storageostypes.VolumeCreateOptions) (*storageostypes.Volume, error) {
// Append a label from the api
labels := opts.Labels
labels["labelfromapi"] = "apilabel"
return &storageostypes.Volume{
ID: testVolUUID,
Name: opts.Name,
Namespace: opts.Namespace,
Description: opts.Description,
Pool: opts.Pool,
Size: opts.Size,
FSType: opts.FSType,
Labels: labels,
}, nil
}
func (f fakeAPI) VolumeMount(opts storageostypes.VolumeMountOptions) error {
return nil
}
func (f fakeAPI) VolumeUnmount(opts storageostypes.VolumeUnmountOptions) error {
return nil
}
func (f fakeAPI) VolumeDelete(opts storageostypes.DeleteOptions) error {
return nil
}
func (f fakeAPI) Node(ref string) (*storageostypes.Node, error) {
return &storageostypes.Node{}, nil
}
func TestCreateVolume(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("storageos_test")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, _ := plugMgr.FindPluginByName("kubernetes.io/storageos")
// Use real util with stubbed api
util := &storageosUtil{}
util.api = fakeAPI{}
labels := map[string]string{
"labelA": "valueA",
"labelB": "valueB",
}
options := volume.VolumeOptions{
PVName: testPVName,
PVC: volumetest.CreateTestPVC(fmt.Sprintf("%dGi", testSize), []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner := &storageosProvisioner{
storageosMounter: &storageosMounter{
storageos: &storageos{
pvName: testPVName,
volName: testVolName,
volNamespace: testNamespace,
sizeGB: testSize,
pool: testPool,
description: testDesc,
fsType: testFSType,
labels: labels,
manager: util,
plugin: plug.(*storageosPlugin),
},
},
options: options,
}
vol, err := util.CreateVolume(provisioner)
if err != nil {
t.Errorf("CreateVolume() returned error: %v", err)
}
if vol == nil {
t.Fatalf("CreateVolume() vol is empty")
}
if vol.ID == "" {
t.Error("CreateVolume() vol ID is empty")
}
if vol.Name != testVolName {
t.Errorf("CreateVolume() returned unexpected Name %s", vol.Name)
}
if vol.Namespace != testNamespace {
t.Errorf("CreateVolume() returned unexpected Namespace %s", vol.Namespace)
}
if vol.Pool != testPool {
t.Errorf("CreateVolume() returned unexpected Pool %s", vol.Pool)
}
if vol.FSType != testFSType {
t.Errorf("CreateVolume() returned unexpected FSType %s", vol.FSType)
}
if vol.SizeGB != testSize {
t.Errorf("CreateVolume() returned unexpected Size %d", vol.SizeGB)
}
if len(vol.Labels) == 0 {
t.Error("CreateVolume() Labels are empty")
} else {
var val string
var ok bool
for k, v := range labels {
if val, ok = vol.Labels[k]; !ok {
t.Errorf("CreateVolume() Label %s not set", k)
}
if val != v {
t.Errorf("CreateVolume() returned unexpected Label value %s", val)
}
}
if val, ok = vol.Labels["labelfromapi"]; !ok {
t.Error("CreateVolume() Label from api not set")
}
if val != "apilabel" {
t.Errorf("CreateVolume() returned unexpected Label value %s", val)
}
}
}
func TestAttachVolume(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("storageos_test")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, _ := plugMgr.FindPluginByName("kubernetes.io/storageos")
// Use real util with stubbed api
util := &storageosUtil{}
util.api = fakeAPI{}
mounter := &storageosMounter{
storageos: &storageos{
volName: testVolName,
volNamespace: testNamespace,
manager: util,
mounter: &mount.FakeMounter{},
plugin: plug.(*storageosPlugin),
},
deviceDir: tmpDir,
}
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
}

View File

@ -1,161 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"path"
"path/filepath"
"strings"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
utilstrings "k8s.io/utils/strings"
)
var _ volume.BlockVolumePlugin = &vsphereVolumePlugin{}
func (plugin *vsphereVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkUtil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
klog.Errorf("Failed to find GlobalMapPathUUID from Pod: %s with error: %+v", podUID, err)
return nil, err
}
klog.V(5).Infof("globalMapPathUUID: %v", globalMapPathUUID)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) <= 1 {
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
}
return getVolumeSpecFromGlobalMapPath(globalMapPath)
}
func getVolumeSpecFromGlobalMapPath(globalMapPath string) (*volume.Spec, error) {
// Construct volume spec from globalMapPath
// globalMapPath example:
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
// plugins/kubernetes.io/vsphere-volume/volumeDevices/[datastore1]\\040volumes/myDisk
volPath := filepath.Base(globalMapPath)
volPath = strings.Replace(volPath, "\\040", "", -1)
if len(volPath) <= 1 {
return nil, fmt.Errorf("failed to get volume path from global path=%s", globalMapPath)
}
block := v1.PersistentVolumeBlock
vsphereVolume := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volPath,
},
},
VolumeMode: &block,
},
}
return volume.NewSpecFromPersistentVolume(vsphereVolume, true), nil
}
func (plugin *vsphereVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
// If this called via GenerateUnmapDeviceFunc(), pod is nil.
// Pass empty string as dummy uid since uid isn't used in the case.
var uid types.UID
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *vsphereVolumePlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
klog.Errorf("Failed to get Volume source from volume Spec: %+v with error: %+v", *spec, err)
return nil, err
}
volPath := volumeSource.VolumePath
return &vsphereBlockVolumeMapper{
vsphereVolume: &vsphereVolume{
volName: spec.Name(),
podUID: podUID,
volPath: volPath,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
}, nil
}
func (plugin *vsphereVolumePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &VsphereDiskUtil{})
}
func (plugin *vsphereVolumePlugin) newUnmapperInternal(volName string, podUID types.UID, manager vdManager) (volume.BlockVolumeUnmapper, error) {
return &vsphereBlockVolumeUnmapper{
vsphereVolume: &vsphereVolume{
volName: volName,
podUID: podUID,
volPath: volName,
manager: manager,
plugin: plugin,
},
}, nil
}
var _ volume.BlockVolumeMapper = &vsphereBlockVolumeMapper{}
type vsphereBlockVolumeMapper struct {
*vsphereVolume
}
func (v vsphereBlockVolumeMapper) SetUpDevice() (string, error) {
return "", nil
}
func (v vsphereBlockVolumeMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, volumeMapName string, podUID types.UID) error {
return util.MapBlockVolume(devicePath, globalMapPath, volumeMapPath, volumeMapName, podUID)
}
var _ volume.BlockVolumeUnmapper = &vsphereBlockVolumeUnmapper{}
type vsphereBlockVolumeUnmapper struct {
*vsphereVolume
}
func (v *vsphereBlockVolumeUnmapper) TearDownDevice(mapPath, devicePath string) error {
return nil
}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumePath
func (v *vsphereVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return path.Join(v.plugin.host.GetVolumeDevicePluginDir(vsphereVolumePluginName), string(volumeSource.VolumePath)), nil
}
func (v *vsphereVolume) GetPodDeviceMapPath() (string, string) {
return v.plugin.host.GetPodVolumeDeviceDir(v.podUID, utilstrings.EscapeQualifiedName(vsphereVolumePluginName)), v.volName
}

View File

@ -1,147 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
var (
testVolumePath = "volPath1"
testGlobalPath = "plugins/kubernetes.io/vsphere-volume/volumeDevices/volPath1"
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~vsphere-volume"
)
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// make our test path for fake GlobalMapPath
// /tmp symbolized our pluginDir
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/vsphere-volume/volumeDevices/
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil {
t.Fatalf("cant' make a temp dir: %s", err)
}
// deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
// Bad Path
badspec, err := getVolumeSpecFromGlobalMapPath("")
if badspec != nil || err == nil {
t.Errorf("Expected not to get spec from GlobalMapPath but did")
}
// Good Path
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath)
if spec == nil || err != nil {
t.Fatalf("Failed to get spec from GlobalMapPath: %s", err)
}
if spec.PersistentVolume.Spec.VsphereVolume.VolumePath != testVolumePath {
t.Fatalf("Invalid volumePath from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.VsphereVolume.VolumePath)
}
block := v1.PersistentVolumeBlock
specMode := spec.PersistentVolume.Spec.VolumeMode
if &specMode == nil {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", &specMode, block)
}
if *specMode != block {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
}
}
func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil {
t.Fatalf("cant' make a temp dir: %s", err)
}
// deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := path.Join(tmpVDir, testGlobalPath)
expectedPodPath := path.Join(tmpVDir, testPodPath)
spec := getTestVolume(true) // block volume
pluginMgr := volume.VolumePluginMgr{}
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil, volumetest.NewFakeVolumeHost(tmpVDir, nil, nil))
plugin, err := pluginMgr.FindMapperPluginByName(vsphereVolumePluginName)
if err != nil {
os.RemoveAll(tmpVDir)
t.Fatalf("Can't find the plugin by name: %q", vsphereVolumePluginName)
}
if plugin.GetPluginName() != vsphereVolumePluginName {
t.Fatalf("Wrong name: %s", plugin.GetPluginName())
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID("poduid"),
},
}
mapper, err := plugin.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mapper == nil {
t.Fatalf("Got a nil Mounter")
}
// GetGlobalMapPath
globalMapPath, err := mapper.GetGlobalMapPath(spec)
if err != nil || len(globalMapPath) == 0 {
t.Fatalf("Invalid GlobalMapPath from spec: %s", spec.PersistentVolume.Spec.VsphereVolume.VolumePath)
}
if globalMapPath != expectedGlobalPath {
t.Errorf("Failed to get GlobalMapPath: %s %s", globalMapPath, expectedGlobalPath)
}
// GetPodDeviceMapPath
devicePath, volumeName := mapper.GetPodDeviceMapPath()
if devicePath != expectedPodPath {
t.Errorf("Got unexpected pod path: %s, expected %s", devicePath, expectedPodPath)
}
if volumeName != testVolumePath {
t.Errorf("Got unexpected volNamne: %s, expected %s", volumeName, testVolumePath)
}
}
func getTestVolume(isBlock bool) *volume.Spec {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: testVolumePath,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: testVolumePath,
},
},
},
}
if isBlock {
blockMode := v1.PersistentVolumeBlock
pv.Spec.VolumeMode = &blockMode
}
return volume.NewSpecFromPersistentVolume(pv, true)
}