add iscsi volume plugin

Signed-off-by: Huamin Chen <hchen@redhat.com>
pull/6/head
Huamin Chen 2015-03-13 17:31:13 -04:00
parent ed2192a61e
commit 7a82af31de
25 changed files with 1104 additions and 6 deletions

View File

@ -6683,6 +6683,32 @@
}
}
},
"v1beta1.ISCSIVolumeSource": {
"id": "v1beta1.ISCSIVolumeSource",
"properties": {
"fsType": {
"type": "string",
"description": "file system type to mount, such as ext4, xfs, ntfs"
},
"iqn": {
"type": "string",
"description": "iSCSI Qualified Name"
},
"lun": {
"type": "integer",
"format": "int32",
"description": "iscsi target lun number"
},
"readOnly": {
"type": "boolean",
"description": "read-only if true, read-write otherwise (false or unspecified)"
},
"targetPortal": {
"type": "string",
"description": "iSCSI target portal"
}
}
},
"v1beta1.HTTPGetAction": {
"id": "v1beta1.HTTPGetAction",
"properties": {
@ -8607,6 +8633,7 @@
"persistentDisk",
"gitRepo",
"secret",
"iscsi",
"nfs"
],
"properties": {
@ -8630,6 +8657,10 @@
"$ref": "v1beta1.GCEPersistentDiskVolumeSource",
"description": "GCE disk resource attached to the host machine on demand"
},
"iscsi": {
"$ref": "v1beta1.ISCSIVolumeSource",
"description": "iSCSI disk attached to host machine on demand"
},
"secret": {
"$ref": "v1beta1.SecretVolumeSource",
"description": "secret to populate volume with"

View File

@ -6679,6 +6679,32 @@
}
}
},
"v1beta2.ISCSIVolumeSource": {
"id": "v1beta2.ISCSIVolumeSource",
"properties": {
"fsType": {
"type": "string",
"description": "file system type to mount, such as ext4, xfs, ntfs"
},
"iqn": {
"type": "string",
"description": "iSCSI Qualified Name"
},
"lun": {
"type": "integer",
"format": "int32",
"description": "iscsi target lun number"
},
"readOnly": {
"type": "boolean",
"description": "read-only if true, read-write otherwise (false or unspecified)"
},
"targetPortal": {
"type": "string",
"description": "iSCSI target portal"
}
}
},
"v1beta2.HTTPGetAction": {
"id": "v1beta2.HTTPGetAction",
"properties": {
@ -8588,6 +8614,7 @@
"persistentDisk",
"gitRepo",
"secret",
"iscsi",
"nfs"
],
"properties": {
@ -8611,6 +8638,10 @@
"$ref": "v1beta2.GCEPersistentDiskVolumeSource",
"description": "GCE disk resource attached to the host machine on demand"
},
"iscsi": {
"$ref": "v1beta2.ISCSIVolumeSource",
"description": "iSCSI disk attached to host machine on demand"
},
"secret": {
"$ref": "v1beta2.SecretVolumeSource",
"description": "secret to populate volume"

View File

@ -7016,6 +7016,32 @@
}
}
},
"v1beta3.ISCSIVolumeSource": {
"id": "v1beta3.ISCSIVolumeSource",
"properties": {
"fsType": {
"type": "string",
"description": "file system type to mount, such as ext4, xfs, ntfs"
},
"iqn": {
"type": "string",
"description": "iSCSI Qualified Name"
},
"lun": {
"type": "integer",
"format": "int32",
"description": "iscsi target lun number"
},
"readOnly": {
"type": "boolean",
"description": "read-only if true, read-write otherwise (false or unspecified)"
},
"targetPortal": {
"type": "string",
"description": "iSCSI target portal"
}
}
},
"v1beta3.HTTPGetAction": {
"id": "v1beta3.HTTPGetAction",
"properties": {
@ -8660,6 +8686,7 @@
"secret",
"nfs",
"hostPath",
"iscsi",
"emptyDir"
],
"properties": {
@ -8687,6 +8714,10 @@
"$ref": "v1beta3.NFSVolumeSource",
"description": "NFS volume that will be mounted in the host machine"
},
"iscsi": {
"$ref": "v1beta3.ISCSIVolumeSource",
"description": "iSCSI disk attached to host machine on demand"
},
"secret": {
"$ref": "v1beta3.SecretVolumeSource",
"description": "secret to populate volume"

View File

@ -29,6 +29,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_pd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/git_repo"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/host_path"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/iscsi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/nfs"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/secret"
//Cloud providers
@ -53,7 +54,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -175,6 +175,12 @@ func TestExampleObjectSchemas(t *testing.T) {
"claim-02": &api.PersistentVolumeClaim{},
"claim-03": &api.PersistentVolumeClaim{},
},
"../examples/iscsi/v1beta1": {
"iscsi": &api.Pod{},
},
"../examples/iscsi/v1beta3": {
"iscsi": &api.Pod{},
},
}
for path, expected := range cases {
@ -182,7 +188,7 @@ func TestExampleObjectSchemas(t *testing.T) {
err := walkJSONFiles(path, func(name, path string, data []byte) {
expectedType, found := expected[name]
if !found {
t.Errorf("%s does not have a test case defined", path)
t.Errorf("%s: %s does not have a test case defined", path, name)
return
}
tested += 1
@ -210,6 +216,7 @@ func TestReadme(t *testing.T) {
paths := []string{
"../README.md",
"../examples/walkthrough/README.md",
"../examples/iscsi/README.md",
}
for _, path := range paths {

51
examples/iscsi/README.md Normal file
View File

@ -0,0 +1,51 @@
# How to Use it?
Here is my setup to setup Kubernetes with iSCSI persistent storage. I use Fedora 21 on Kubernetes node.
Install iSCSI initiator on the node:
# yum -y install iscsi-initiator-utils
then edit */etc/iscsi/initiatorname.iscsi* and */etc/iscsi/iscsid.conf* to match your iSCSI target configuration.
I mostly follow these [instructions](http://www.server-world.info/en/note?os=Fedora_21&p=iscsi&f=2) to setup iSCSI initiator and these [instructions](http://www.server-world.info/en/note?os=Fedora_21&p=iscsi) to setup iSCSI target.
Once you have installed iSCSI initiator and new Kubernetes, you can create a pod based on my example *iscsi.json*. In the pod JSON, you need to provide *targetPortal* (the iSCSI target's **IP** address and *port* if not the default port 3260), target's *iqn*, *lun*, and the type of the filesystem that has been created on the lun, and *readOnly* boolean.
Once your pod is created, run it on the Kubernetes master:
#cluster/kubectl.sh create -f your_new_pod.json
Here is my command and output:
```console
# cluster/kubectl.sh create -f examples/iscsi/v1beta3/iscsi.json
# cluster/kubectl.sh get pods
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED
iscsi 172.17.0.5 iscsipd-ro kubernetes/pause fed-minion/10.16.154.75 <none> Running About a minute
iscsipd-rw kubernetes/pause
```
On the Kubernetes node, I got these in mount output
```console
#mount |grep kub
/dev/sdb on /var/lib/kubelet/plugins/kubernetes.io/iscsi/iscsi/10.16.154.81:3260/iqn.2014-12.world.server:storage.target1/lun/0 type ext4 (ro,relatime,stripe=1024,data=ordered)
/dev/sdb on /var/lib/kubelet/pods/4ab78fdc-b927-11e4-ade6-d4bed9b39058/volumes/kubernetes.io~iscsi/iscsipd-ro type ext4 (ro,relatime,stripe=1024,data=ordered)
/dev/sdc on /var/lib/kubelet/plugins/kubernetes.io/iscsi/iscsi/10.16.154.81:3260/iqn.2014-12.world.server:storage.target1/lun/1 type xfs (rw,relatime,attr2,inode64,noquota)
/dev/sdc on /var/lib/kubelet/pods/4ab78fdc-b927-11e4-ade6-d4bed9b39058/volumes/kubernetes.io~iscsi/iscsipd-rw type xfs (rw,relatime,attr2,inode64,noquota)
```
If you ssh to that machine, you can run `docker ps` to see the actual pod.
```console
# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
cc9bd22d9e9d kubernetes/pause:latest "/pause" 3 minutes ago Up 3 minutes k8s_iscsipd-rw.12d8f0c5_iscsipd.default.etcd_4ab78fdc-b927-11e4-ade6-d4bed9b39058_e3f49dcc
```
Run *docker inspect* and I found the Containers mounted the host directory into the their */mnt/iscsipd* directory.
```console
#docker inspect --format "\{\{\.Volumes\}\}" cc9bd22d9e9d
map[/mnt/iscsipd:/var/lib/kubelet/pods/4ab78fdc-b927-11e4-ade6-d4bed9b39058/volumes/kubernetes.io~iscsi/iscsipd-rw /dev/termination-log:/var/lib/kubelet/pods/4ab78fdc-b927-11e4-ade6-d4bed9b39058/containers/iscsipd-rw/cc9bd22d9e9db3c88a150cadfdccd86e36c463629035b48bdcfc8ec534be8615]
```

View File

@ -0,0 +1,59 @@
{
"id": "iscsipd",
"kind": "Pod",
"apiVersion": "v1beta1",
"desiredState": {
"manifest": {
"version": "v1beta1",
"id": "iscsipd",
"containers": [
{
"name": "iscsipd-ro",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/iscsipd",
"name": "iscsipd-ro"
}
]
},
{
"name": "iscsipd-rw",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/iscsipd",
"name": "iscsipd-rw"
}
]
}
],
"volumes": [
{
"name": "iscsipd-ro",
"source": {
"iscsi": {
"targetPortal": "10.16.154.81:3260",
"iqn": "iqn.2014-12.world.server:storage.target01",
"lun": 0,
"fsType": "ext4",
"readOnly": true
}
}
},
{
"name": "iscsipd-rw",
"source": {
"iscsi": {
"targetPortal": "10.16.154.81:3260",
"iqn": "iqn.2014-12.world.server:storage.target01",
"lun": 1,
"fsType": "xfs",
"readOnly": false
}
}
}
]
}
}
}

View File

@ -0,0 +1,54 @@
{
"apiVersion": "v1beta3",
"id": "iscsipd",
"kind": "Pod",
"metadata": {
"name": "iscsi"
},
"spec": {
"containers": [
{
"name": "iscsipd-ro",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/iscsipd",
"name": "iscsipd-ro"
}
]
},
{
"name": "iscsipd-rw",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/iscsipd",
"name": "iscsipd-rw"
}
]
}
],
"volumes": [
{
"name": "iscsipd-ro",
"iscsi": {
"targetPortal": "10.16.154.81:3260",
"iqn": "iqn.2014-12.world.server:storage.target01",
"lun": 0,
"fsType": "ext4",
"readOnly": true
}
},
{
"name": "iscsipd-rw",
"iscsi": {
"targetPortal": "10.16.154.81:3260",
"iqn": "iqn.2014-12.world.server:storage.target01",
"lun": 1,
"fsType": "xfs",
"readOnly": false
}
}
]
}
}

View File

@ -173,7 +173,7 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
func(vs *api.VolumeSource, c fuzz.Continue) {
// Exactly one of the fields should be set.
//FIXME: the fuzz can still end up nil. What if fuzz allowed me to say that?
fuzzOneOf(c, &vs.HostPath, &vs.EmptyDir, &vs.GCEPersistentDisk, &vs.GitRepo, &vs.Secret, &vs.NFS)
fuzzOneOf(c, &vs.HostPath, &vs.EmptyDir, &vs.GCEPersistentDisk, &vs.GitRepo, &vs.Secret, &vs.NFS, &vs.ISCSI)
},
func(d *api.DNSPolicy, c fuzz.Continue) {
policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault}

View File

@ -195,6 +195,9 @@ type VolumeSource struct {
Secret *SecretVolumeSource `json:"secret"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
NFS *NFSVolumeSource `json:"nfs"`
// ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -368,6 +371,25 @@ type GCEPersistentDiskVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// A ISCSI Disk can only be mounted as read/write once.
type ISCSIVolumeSource struct {
// Required: iSCSI target portal
// the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
TargetPortal string `json:"targetPortal,omitempty"`
// Required: target iSCSI Qualified Name
IQN string `json:"iqn,omitempty"`
// Required: iSCSI target lun number
Lun int `json:"lun,omitempty"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty"`
}
// GitRepoVolumeSource represents a volume that is pulled from git when the pod is created.
type GitRepoVolumeSource struct {
// Repository URL

View File

@ -1167,6 +1167,9 @@ func init() {
if err := s.Convert(&in.GCEPersistentDisk, &out.GCEPersistentDisk, 0); err != nil {
return err
}
if err := s.Convert(&in.ISCSI, &out.ISCSI, 0); err != nil {
return err
}
if err := s.Convert(&in.HostPath, &out.HostDir, 0); err != nil {
return err
}
@ -1188,6 +1191,9 @@ func init() {
if err := s.Convert(&in.GCEPersistentDisk, &out.GCEPersistentDisk, 0); err != nil {
return err
}
if err := s.Convert(&in.ISCSI, &out.ISCSI, 0); err != nil {
return err
}
if err := s.Convert(&in.HostDir, &out.HostPath, 0); err != nil {
return err
}

View File

@ -111,6 +111,9 @@ type VolumeSource struct {
Secret *SecretVolumeSource `json:"secret" description:"secret to populate volume with"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
NFS *NFSVolumeSource `json:"nfs" description:"NFS volume that will be mounted in the host machine "`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -276,6 +279,25 @@ type GCEPersistentDiskVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty" description:"read-only if true, read-write otherwise (false or unspecified)"`
}
// A ISCSI Disk can only be mounted as read/write once.
type ISCSIVolumeSource struct {
// Required: iSCSI target portal
// the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
TargetPortal string `json:"targetPortal,omitempty" description:"iSCSI target portal"`
// Required: target iSCSI Qualified Name
IQN string `json:"iqn,omitempty" description:"iSCSI Qualified Name"`
// Required: iSCSI target lun number
Lun int `json:"lun,omitempty" description:"iscsi target lun number"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty" description:"file system type to mount, such as ext4, xfs, ntfs"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty" description:"read-only if true, read-write otherwise (false or unspecified)"`
}
// GitRepoVolumeSource represents a volume that is pulled from git when the pod is created.
type GitRepoVolumeSource struct {
// Repository URL

View File

@ -1091,6 +1091,9 @@ func init() {
if err := s.Convert(&in.GitRepo, &out.GitRepo, 0); err != nil {
return err
}
if err := s.Convert(&in.ISCSI, &out.ISCSI, 0); err != nil {
return err
}
if err := s.Convert(&in.GCEPersistentDisk, &out.GCEPersistentDisk, 0); err != nil {
return err
}
@ -1115,6 +1118,9 @@ func init() {
if err := s.Convert(&in.GCEPersistentDisk, &out.GCEPersistentDisk, 0); err != nil {
return err
}
if err := s.Convert(&in.ISCSI, &out.ISCSI, 0); err != nil {
return err
}
if err := s.Convert(&in.HostDir, &out.HostPath, 0); err != nil {
return err
}

View File

@ -80,6 +80,9 @@ type VolumeSource struct {
Secret *SecretVolumeSource `json:"secret" description:"secret to populate volume"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
NFS *NFSVolumeSource `json:"nfs" description:"NFS volume that will be mounted in the host machine"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -285,6 +288,25 @@ type GitRepoVolumeSource struct {
Revision string `json:"revision" description:"commit hash for the specified revision"`
}
// A ISCSI Disk can only be mounted as read/write once.
type ISCSIVolumeSource struct {
// Required: iSCSI target portal
// the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
TargetPortal string `json:"targetPortal,omitempty" description:"iSCSI target portal"`
// Required: target iSCSI Qualified Name
IQN string `json:"iqn,omitempty" description:"iSCSI Qualified Name"`
// Required: iSCSI target lun number
Lun int `json:"lun,omitempty" description:"iscsi target lun number"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty" description:"file system type to mount, such as ext4, xfs, ntfs"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty" description:"read-only if true, read-write otherwise (false or unspecified)"`
}
// VolumeMount describes a mounting of a Volume within a container.
//
// https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/volumes.md

View File

@ -212,6 +212,9 @@ type VolumeSource struct {
Secret *SecretVolumeSource `json:"secret" description:"secret to populate volume"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
NFS *NFSVolumeSource `json:"nfs" description:"NFS volume that will be mounted in the host machine"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -409,6 +412,25 @@ type NFSVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty" description:"forces the NFS export to be mounted with read-only permissions"`
}
// A ISCSI Disk can only be mounted as read/write once.
type ISCSIVolumeSource struct {
// Required: iSCSI target portal
// the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260)
TargetPortal string `json:"targetPortal,omitempty" description:"iSCSI target portal"`
// Required: target iSCSI Qualified Name
IQN string `json:"iqn,omitempty" description:"iSCSI Qualified Name"`
// Required: iSCSI target lun number
Lun int `json:"lun,omitempty" description:"iscsi target lun number"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty" description:"file system type to mount, such as ext4, xfs, ntfs"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty" description:"read-only if true, read-write otherwise (false or unspecified)"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// Optional: If specified, this must be a DNS_LABEL. Each named port

View File

@ -1312,6 +1312,31 @@
}
}
},
"v1beta1.ISCSI": {
"id": "v1beta1.ISCSI",
"required": [
"targetPortal",
"iqn"
],
"properties": {
"fsType": {
"type": "string"
},
"lun": {
"type": "integer",
"format": "int32"
},
"targetPortal": {
"type": "string"
},
"readOnly": {
"type": "boolean"
},
"iqn": {
"type": "string"
}
}
},
"v1beta1.HTTPGetAction": {
"id": "v1beta1.HTTPGetAction",
"properties": {
@ -2042,7 +2067,8 @@
"hostDir",
"emptyDir",
"persistentDisk",
"gitRepo"
"gitRepo",
"iscsi"
],
"properties": {
"emptyDir": {
@ -2056,7 +2082,10 @@
},
"persistentDisk": {
"type": "v1beta1.GCEPersistentDisk"
}
},
"iscsi": {
"type": "v1beta1.ISCSI"
}
}
}
}

View File

@ -307,6 +307,10 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
numVolumes++
allErrs = append(allErrs, validateNFS(source.NFS).Prefix("nfs")...)
}
if source.ISCSI != nil {
numVolumes++
allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI).Prefix("iscsi")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required"))
}
@ -329,6 +333,23 @@ func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource) errs.Validati
return allErrs
}
func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if iscsi.TargetPortal == "" {
allErrs = append(allErrs, errs.NewFieldRequired("targetPortal"))
}
if iscsi.IQN == "" {
allErrs = append(allErrs, errs.NewFieldRequired("iqn"))
}
if iscsi.FSType == "" {
allErrs = append(allErrs, errs.NewFieldRequired("fsType"))
}
if iscsi.Lun < 0 || iscsi.Lun > 255 {
allErrs = append(allErrs, errs.NewFieldInvalid("lun", iscsi.Lun, ""))
}
return allErrs
}
func validateGCEPersistentDiskVolumeSource(PD *api.GCEPersistentDiskVolumeSource) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if PD.PDName == "" {

View File

@ -517,16 +517,19 @@ func TestValidateVolumes(t *testing.T) {
{Name: "empty", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
{Name: "gcepd", VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{"my-PD", "ext4", 1, false}}},
{Name: "gitrepo", VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{"my-repo", "hashstring"}}},
{Name: "iscsidisk", VolumeSource: api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "iqn.2015-02.example.com:test", 1, "ext4", false}}},
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{"my-secret"}}},
}
names, errs := validateVolumes(successCase)
if len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret") {
if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk") {
t.Errorf("wrong names result: %v", names)
}
emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}
emptyPortal := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"", "iqn.2015-02.example.com:test", 1, "ext4", false}}
emptyIQN := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "", 1, "ext4", false}}
errorCases := map[string]struct {
V []api.Volume
T errors.ValidationErrorType
@ -536,6 +539,8 @@ func TestValidateVolumes(t *testing.T) {
"name > 63 characters": {[]api.Volume{{Name: strings.Repeat("a", 64), VolumeSource: emptyVS}}, errors.ValidationErrorTypeInvalid, "[0].name"},
"name not a DNS label": {[]api.Volume{{Name: "a.b.c", VolumeSource: emptyVS}}, errors.ValidationErrorTypeInvalid, "[0].name"},
"name not unique": {[]api.Volume{{Name: "abc", VolumeSource: emptyVS}, {Name: "abc", VolumeSource: emptyVS}}, errors.ValidationErrorTypeDuplicate, "[1].name"},
"empty portal": {[]api.Volume{{Name: "badportal", VolumeSource: emptyPortal}}, errors.ValidationErrorTypeRequired, "[0].source.iscsi.targetPortal"},
"empty iqn": {[]api.Volume{{Name: "badiqn", VolumeSource: emptyIQN}}, errors.ValidationErrorTypeRequired, "[0].source.iscsi.iqn"},
}
for k, v := range errorCases {
_, errs := validateVolumes(v.V)

View File

@ -76,3 +76,31 @@ func GetMountRefs(mounter Interface, mountPath string) ([]string, error) {
}
return refs, nil
}
// GetDeviceNameFromMount: given a mnt point, find the device from /proc/mounts
// returns the device name, reference count, and error code
func GetDeviceNameFromMount(mounter Interface, mountPath string) (string, int, error) {
mps, err := mounter.List()
if err != nil {
return "", 0, err
}
// Find the device name.
// FIXME if multiple devices mounted on the same mount path, only the first one is returned
device := ""
for i := range mps {
if mps[i].Path == mountPath {
device = mps[i].Device
break
}
}
// Find all references to the device.
refCount := 0
for i := range mps {
if mps[i].Device == device {
refCount++
}
}
return device, refCount, nil
}

View File

@ -151,3 +151,32 @@ func setEquivalent(set1, set2 []string) bool {
}
return true
}
func TestGetDeviceNameFromMount(t *testing.T) {
fm := &FakeMounter{
MountPoints: []MountPoint{
{Device: "/dev/disk/by-path/prefix-lun-1",
Path: "/mnt/111"},
{Device: "/dev/disk/by-path/prefix-lun-1",
Path: "/mnt/222"},
},
}
tests := []struct {
mountPath string
expectedDevice string
expectedRefs int
}{
{
"/mnt/222",
"/dev/disk/by-path/prefix-lun-1",
2,
},
}
for i, test := range tests {
if device, refs, err := GetDeviceNameFromMount(fm, test.mountPath); err != nil || test.expectedRefs != refs || test.expectedDevice != device {
t.Errorf("%d. GetDeviceNameFromMount(%s) = (%s, %d), %v; expected (%s,%d), nil", i, test.mountPath, device, refs, err, test.expectedDevice, test.expectedRefs)
}
}
}

View File

@ -0,0 +1,112 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iscsi
import (
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/golang/glog"
)
// Abstract interface to disk operations.
type diskManager interface {
MakeGlobalPDName(disk iscsiDisk) string
// Attaches the disk to the kubelet's host machine.
AttachDisk(disk iscsiDisk) error
// Detaches the disk from the kubelet's host machine.
DetachDisk(disk iscsiDisk, mntPath string) error
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, disk iscsiDisk, volPath string, mounter mount.Interface) error {
globalPDPath := manager.MakeGlobalPDName(disk)
// TODO: handle failed mounts here.
mountpoint, err := mounter.IsMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if mountpoint {
return nil
}
if err := manager.AttachDisk(disk); err != nil {
glog.Errorf("failed to attach disk")
return err
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
flags := uintptr(0)
if disk.readOnly {
flags = mount.FlagReadOnly
}
err = mounter.Mount(globalPDPath, volPath, "", mount.FlagBind|flags, "")
if err != nil {
glog.Errorf("failed to bind mount:%s", globalPDPath)
return err
}
return nil
}
// utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, disk iscsiDisk, volPath string, mounter mount.Interface) error {
mountpoint, err := mounter.IsMountPoint(volPath)
if err != nil {
glog.Errorf("cannot validate mountpoint %s", volPath)
return err
}
if !mountpoint {
return os.Remove(volPath)
}
refs, err := mount.GetMountRefs(mounter, volPath)
if err != nil {
glog.Errorf("failed to get reference count %s", volPath)
return err
}
if err := mounter.Unmount(volPath, 0); err != nil {
glog.Errorf("failed to umount %s", volPath)
return err
}
// If len(refs) is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 {
mntPath := refs[0]
if err := manager.DetachDisk(disk, mntPath); err != nil {
glog.Errorf("failed to detach disk from %s", mntPath)
return err
}
}
mountpoint, mntErr := mounter.IsMountPoint(volPath)
if mntErr != nil {
glog.Errorf("isMountpoint check failed: %v", mntErr)
return err
}
if !mountpoint {
if err := os.Remove(volPath); err != nil {
return err
}
}
return nil
}

168
pkg/volume/iscsi/iscsi.go Normal file
View File

@ -0,0 +1,168 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iscsi
import (
"strconv"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/golang/glog"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&ISCSIPlugin{nil, exec.New()}}
}
type ISCSIPlugin struct {
host volume.VolumeHost
exe exec.Interface
}
var _ volume.VolumePlugin = &ISCSIPlugin{}
const (
ISCSIPluginName = "kubernetes.io/iscsi"
)
func (plugin *ISCSIPlugin) Init(host volume.VolumeHost) {
plugin.host = host
}
func (plugin *ISCSIPlugin) Name() string {
return ISCSIPluginName
}
func (plugin *ISCSIPlugin) CanSupport(spec *api.Volume) bool {
if spec.ISCSI == nil {
return false
}
// see if iscsiadm is there
_, err := plugin.execCommand("iscsiadm", []string{"-h"})
if err == nil {
return true
}
return false
}
func (plugin *ISCSIPlugin) GetAccessModes() []api.AccessModeType {
return []api.AccessModeType{
api.ReadWriteOnce,
api.ReadOnlyMany,
}
}
func (plugin *ISCSIPlugin) NewBuilder(spec *api.Volume, podRef *api.ObjectReference) (volume.Builder, error) {
// Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, podRef.UID, &ISCSIUtil{}, mount.New())
}
func (plugin *ISCSIPlugin) newBuilderInternal(spec *api.Volume, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
lun := strconv.Itoa(spec.ISCSI.Lun)
return &iscsiDisk{
podUID: podUID,
volName: spec.Name,
portal: spec.ISCSI.TargetPortal,
iqn: spec.ISCSI.IQN,
lun: lun,
fsType: spec.ISCSI.FSType,
readOnly: spec.ISCSI.ReadOnly,
manager: manager,
mounter: mounter,
plugin: plugin,
}, nil
}
func (plugin *ISCSIPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
// Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &ISCSIUtil{}, mount.New())
}
func (plugin *ISCSIPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) {
return &iscsiDisk{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}, nil
}
type iscsiDisk struct {
volName string
podUID types.UID
portal string
iqn string
readOnly bool
lun string
fsType string
plugin *ISCSIPlugin
mounter mount.Interface
// Utility interface that provides API calls to the provider to attach/detach disks.
manager diskManager
}
func (iscsi *iscsiDisk) GetPath() string {
name := ISCSIPluginName
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, util.EscapeQualifiedNameForDisk(name), iscsi.volName)
}
func (iscsi *iscsiDisk) SetUp() error {
return iscsi.SetUpAt(iscsi.GetPath())
}
func (iscsi *iscsiDisk) SetUpAt(dir string) error {
// diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(iscsi.manager, *iscsi, dir, iscsi.mounter)
if err != nil {
glog.Errorf("iscsi: failed to setup")
return err
}
globalPDPath := iscsi.manager.MakeGlobalPDName(*iscsi)
// make mountpoint rw/ro work as expected
//FIXME revisit pkg/util/mount and ensure rw/ro is implemented as expected
mode := "rw"
if iscsi.readOnly {
mode = "ro"
}
iscsi.plugin.execCommand("mount", []string{"-o", "remount," + mode, globalPDPath, dir})
return nil
}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (iscsi *iscsiDisk) TearDown() error {
return iscsi.TearDownAt(iscsi.GetPath())
}
func (iscsi *iscsiDisk) TearDownAt(dir string) error {
return diskTearDown(iscsi.manager, *iscsi, dir, iscsi.mounter)
}
func (plugin *ISCSIPlugin) execCommand(command string, args []string) ([]byte, error) {
cmd := plugin.exe.Command(command, args...)
return cmd.CombinedOutput()
}

View File

@ -0,0 +1,131 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iscsi
import (
"os"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
)
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/iscsi")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.Name() != "kubernetes.io/iscsi" {
t.Errorf("Wrong name: %s", plug.Name())
}
}
type fakeDiskManager struct{}
func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string {
return "/tmp/fake_iscsi_path"
}
func (fake *fakeDiskManager) AttachDisk(disk iscsiDisk) error {
globalPath := disk.manager.MakeGlobalPDName(disk)
err := os.MkdirAll(globalPath, 0750)
if err != nil {
return err
}
return nil
}
func (fake *fakeDiskManager) DetachDisk(disk iscsiDisk, mntPath string) error {
globalPath := disk.manager.MakeGlobalPDName(disk)
err := os.RemoveAll(globalPath)
if err != nil {
return err
}
return nil
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/iscsi")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
ISCSI: &api.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
}
builder, err := plug.(*ISCSIPlugin).newBuilderInternal(spec, types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder: %v")
}
path := builder.GetPath()
if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~iscsi/vol1" {
t.Errorf("Got unexpected path: %s", path)
}
if err := builder.SetUp(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
cleaner, err := plug.(*ISCSIPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err)
}
if cleaner == nil {
t.Errorf("Got a nil Cleaner: %v")
}
if err := cleaner.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}

View File

@ -0,0 +1,156 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iscsi
import (
"errors"
"os"
"path"
"strings"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/golang/glog"
)
// stat a path, if not exists, retry maxRetries times
func waitForPathToExist(devicePath string, maxRetries int) bool {
for i := 0; i < maxRetries; i++ {
_, err := os.Stat(devicePath)
if err == nil {
return true
}
if err != nil && !os.IsNotExist(err) {
return false
}
time.Sleep(time.Second)
}
return false
}
// getDevicePrefixRefCount: given a prefix of device path, find its reference count from /proc/mounts
// returns the reference count to the device and error code
// for services like iscsi construct multiple device paths with the same prefix pattern.
// this function aggregates all references to a service based on the prefix pattern
// More specifically, this prefix semantics is to aggregate disk paths that belong to the same iSCSI target/iqn pair.
// an iSCSI target could expose multiple LUNs through the same IQN, and Linux iSCSI initiator creates disk paths that start the same prefix but end with different LUN number
// When we decide whether it is time to logout a target, we have to see if none of the LUNs are used any more.
// That's where the prefix based ref count kicks in. If we only count the disks using exact match, we could log other disks out.
func getDevicePrefixRefCount(mounter mount.Interface, deviceNamePrefix string) (int, error) {
mps, err := mounter.List()
if err != nil {
return -1, err
}
// Find the number of references to the device.
refCount := 0
for i := range mps {
if strings.HasPrefix(mps[i].Device, deviceNamePrefix) {
refCount++
}
}
return refCount, nil
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/iscsi/portal-iqn-some_iqn-lun-0
func makePDNameInternal(host volume.VolumeHost, portal string, iqn string, lun string) string {
return path.Join(host.GetPluginDir(ISCSIPluginName), "iscsi", portal+"-iqn-"+iqn+"-lun-"+lun)
}
type ISCSIUtil struct{}
func (util *ISCSIUtil) MakeGlobalPDName(iscsi iscsiDisk) string {
return makePDNameInternal(iscsi.plugin.host, iscsi.portal, iscsi.iqn, iscsi.lun)
}
func (util *ISCSIUtil) AttachDisk(iscsi iscsiDisk) error {
devicePath := strings.Join([]string{"/dev/disk/by-path/ip", iscsi.portal, "iscsi", iscsi.iqn, "lun", iscsi.lun}, "-")
exist := waitForPathToExist(devicePath, 1)
if exist == false {
// discover iscsi target
_, err := iscsi.plugin.execCommand("iscsiadm", []string{"-m", "discovery", "-t", "sendtargets", "-p", iscsi.portal})
if err != nil {
glog.Errorf("iscsi: failed to sendtargets to portal %s error:%v", iscsi.portal, err)
return err
}
// login to iscsi target
_, err = iscsi.plugin.execCommand("iscsiadm", []string{"-m", "node", "-p", iscsi.portal, "-T", iscsi.iqn, "--login"})
if err != nil {
glog.Errorf("iscsi: failed to attach disk:Error: %v", err)
return err
}
exist = waitForPathToExist(devicePath, 10)
if !exist {
return errors.New("Could not attach disk: Timeout after 10s")
}
}
// mount it
globalPDPath := iscsi.manager.MakeGlobalPDName(iscsi)
mountpoint, err := iscsi.mounter.IsMountPoint(globalPDPath)
if mountpoint {
glog.Infof("iscsi: %s already mounted", globalPDPath)
return nil
}
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath)
return err
}
err = iscsi.mounter.Mount(devicePath, globalPDPath, iscsi.fsType, uintptr(0), "")
if err != nil {
glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, iscsi.fsType, globalPDPath, err)
}
return err
}
func (util *ISCSIUtil) DetachDisk(iscsi iscsiDisk, mntPath string) error {
device, cnt, err := mount.GetDeviceNameFromMount(iscsi.mounter, mntPath)
if err != nil {
glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
return err
}
if err = iscsi.mounter.Unmount(mntPath, 0); err != nil {
glog.Errorf("iscsi detach disk: failed to umount: %s\nError: %v", mntPath, err)
return err
}
cnt--
// if device is no longer used, see if need to logout the target
if cnt == 0 {
// strip -lun- from device path
ind := strings.LastIndex(device, "-lun-")
prefix := device[:(ind - 1)]
refCount, err := getDevicePrefixRefCount(iscsi.mounter, prefix)
if err == nil && refCount == 0 {
// this portal/iqn are no longer referenced, log out
// extract portal and iqn from device path
ind1 := strings.LastIndex(device, "-iscsi-")
portal := device[(len("/dev/disk/by-path/ip-")):ind1]
iqn := device[ind1+len("-iscsi-") : ind]
glog.Infof("iscsi: log out target %s iqn %s", portal, iqn)
_, err = iscsi.plugin.execCommand("iscsiadm", []string{"-m", "node", "-p", portal, "-T", iqn, "--logout"})
if err != nil {
glog.Errorf("iscsi: failed to detach disk Error: %v", err)
}
}
}
return nil
}

View File

@ -0,0 +1,54 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iscsi
import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
)
func TestGetDevicePrefixRefCount(t *testing.T) {
fm := &mount.FakeMounter{
MountPoints: []mount.MountPoint{
{Device: "/dev/disk/by-path/prefix-lun-1",
Path: "/mnt/111"},
{Device: "/dev/disk/by-path/prefix-lun-1",
Path: "/mnt/222"},
{Device: "/dev/disk/by-path/prefix-lun-0",
Path: "/mnt/333"},
{Device: "/dev/disk/by-path/prefix-lun-0",
Path: "/mnt/444"},
},
}
tests := []struct {
devicePrefix string
expectedRefs int
}{
{
"/dev/disk/by-path/prefix",
4,
},
}
for i, test := range tests {
if refs, err := getDevicePrefixRefCount(fm, test.devicePrefix); err != nil || test.expectedRefs != refs {
t.Errorf("%d. GetDevicePrefixRefCount(%s) = %d, %v; expected %d, nil", i, test.devicePrefix, refs, err, test.expectedRefs)
}
}
}