support fibre channel volume

Signed-off-by: Huamin Chen <hchen@redhat.com>
pull/6/head
Huamin Chen 2015-08-11 11:19:29 -04:00
parent 0f8cc8926f
commit ed9a1bbd3a
22 changed files with 1375 additions and 1 deletions

View File

@ -12123,6 +12123,10 @@
"$ref": "v1.CephFSVolumeSource",
"description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime"
},
"fc": {
"$ref": "v1.FCVolumeSource",
"description": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod."
},
"accessModes": {
"type": "array",
"items": {
@ -12396,6 +12400,37 @@
}
}
},
"v1.FCVolumeSource": {
"id": "v1.FCVolumeSource",
"description": "A Fibre Channel Disk can only be mounted as read/write once.",
"required": [
"targetWWNs",
"lun",
"fsType"
],
"properties": {
"targetWWNs": {
"type": "array",
"items": {
"type": "string"
},
"description": "Required: FC target world wide names (WWNs)"
},
"lun": {
"type": "integer",
"format": "int32",
"description": "Required: FC target lun number"
},
"fsType": {
"type": "string",
"description": "Required: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\""
},
"readOnly": {
"type": "boolean",
"description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."
}
}
},
"v1.PersistentVolumeStatus": {
"id": "v1.PersistentVolumeStatus",
"description": "PersistentVolumeStatus is the current status of a persistent volume.",
@ -12606,6 +12641,10 @@
"downwardAPI": {
"$ref": "v1.DownwardAPIVolumeSource",
"description": "DownwardAPI represents downward API about the pod that should populate this volume"
},
"fc": {
"$ref": "v1.FCVolumeSource",
"description": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod."
}
}
},

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/volume/cinder"
"k8s.io/kubernetes/pkg/volume/downwardapi"
"k8s.io/kubernetes/pkg/volume/empty_dir"
"k8s.io/kubernetes/pkg/volume/fc"
"k8s.io/kubernetes/pkg/volume/gce_pd"
"k8s.io/kubernetes/pkg/volume/git_repo"
"k8s.io/kubernetes/pkg/volume/glusterfs"
@ -67,6 +68,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -340,6 +340,9 @@ func TestExampleObjectSchemas(t *testing.T) {
"cephfs": &api.Pod{},
"cephfs-with-secret": &api.Pod{},
},
"../examples/fibre_channel": {
"fc": &api.Pod{},
},
}
capabilities.SetForTests(capabilities.Capabilities{

View File

@ -0,0 +1,77 @@
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<strong>
The latest 1.0.x release of this document can be found
[here](http://releases.k8s.io/release-1.0/examples/fibre_channel/README.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## Step 1. Setting up Fibre Channel Target
On your FC SAN Zone manager, allocate and mask LUNs so Kubernetes hosts can access them.
## Step 2. Creating the Pod with Fibre Channel persistent storage
Once you have installed Fibre Channel initiator and new Kubernetes, you can create a pod based on my example [fc.yaml](fc.yaml). In the pod JSON, you need to provide *targetWWNs* (array of Fibre Channel target's World Wide Names), *lun*, and the type of the filesystem that has been created on the lun, and *readOnly* boolean.
Once your pod is created, run it on the Kubernetes master:
```console
kubectl create -f ./your_new_pod.json
```
Here is my command and output:
```console
# kubectl create -f examples/fibre_channel/fc.yaml
# kubectl get pods
NAME READY STATUS RESTARTS AGE
fcpd 2/2 Running 0 10m
```
On the Kubernetes host, I got these in mount output
```console
#mount |grep /var/lib/kubelet/plugins/kubernetes.io
/dev/mapper/360a98000324669436c2b45666c567946 on /var/lib/kubelet/plugins/kubernetes.io/fc/500a0982991b8dc5-lun-2 type ext4 (ro,relatime,seclabel,stripe=16,data=ordered)
/dev/mapper/360a98000324669436c2b45666c567944 on /var/lib/kubelet/plugins/kubernetes.io/fc/500a0982991b8dc5-lun-1 type ext4 (rw,relatime,seclabel,stripe=16,data=ordered)
```
If you ssh to that machine, you can run `docker ps` to see the actual pod.
```console
# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
090ac457ddc2 kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-rw.aae720ec_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_99eb5415
5e2629cf3e7b kubernetes/pause "/pause" 12 minutes ago Up 12 minutes k8s_fcpd-ro.857720dc_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_c0175742
2948683253f7 gcr.io/google_containers/pause:0.8.0 "/pause" 12 minutes ago Up 12 minutes k8s_POD.7be6d81d_fcpd_default_4024318f-4121-11e5-a294-e839352ddd54_8d9dd7bf
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/fibre_channel/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
name: fc
spec:
containers:
- image: kubernetes/pause
name: fc
volumeMounts:
- name: fc-vol
mountPath: /mnt/fc
volumes:
- name: fc-vol
fc:
targetWWNs: ['500a0982991b8dc5', '500a0982891b8dc5']
lun: 2
fsType: ext4
readOnly: true

View File

@ -551,6 +551,26 @@ func deepCopy_api_ExecAction(in ExecAction, out *ExecAction, c *conversion.Clone
return nil
}
func deepCopy_api_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conversion.Cloner) error {
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, c *conversion.Cloner) error {
out.PDName = in.PDName
out.FSType = in.FSType
@ -1267,6 +1287,14 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist
} else {
out.CephFS = nil
}
if in.FC != nil {
out.FC = new(FCVolumeSource)
if err := deepCopy_api_FCVolumeSource(*in.FC, out.FC, c); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -2155,6 +2183,14 @@ func deepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(FCVolumeSource)
if err := deepCopy_api_FCVolumeSource(*in.FC, out.FC, c); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -2235,6 +2271,7 @@ func init() {
deepCopy_api_EventList,
deepCopy_api_EventSource,
deepCopy_api_ExecAction,
deepCopy_api_FCVolumeSource,
deepCopy_api_GCEPersistentDiskVolumeSource,
deepCopy_api_GitRepoVolumeSource,
deepCopy_api_GlusterfsVolumeSource,

View File

@ -206,6 +206,8 @@ type VolumeSource struct {
// DownwardAPI represents metadata about the pod that should populate this volume
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
FC *FCVolumeSource `json:"fc,omitempty"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -235,6 +237,8 @@ type PersistentVolumeSource struct {
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
CephFS *CephFSVolumeSource `json:"cephfs,omitempty"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
FC *FCVolumeSource `json:"fc,omitempty"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -452,6 +456,22 @@ type ISCSIVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// A Fibre Channel Disk can only be mounted as read/write once.
type FCVolumeSource struct {
// Required: FC target world wide names (WWNs)
TargetWWNs []string `json:"targetWWNs"`
// Required: FC target lun number
Lun *int `json:"lun"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty"`
}
// AWSElasticBlockStoreVolumeSource represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist and be formatted before mounting to a container.

View File

@ -639,6 +639,29 @@ func convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction
return nil
}
func convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.FCVolumeSource))(in)
}
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.GCEPersistentDiskVolumeSource))(in)
@ -1458,6 +1481,14 @@ func convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.Per
} else {
out.CephFS = nil
}
if in.FC != nil {
out.FC = new(FCVolumeSource)
if err := convert_api_FCVolumeSource_To_v1_FCVolumeSource(in.FC, out.FC, s); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -2385,6 +2416,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *Volu
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(FCVolumeSource)
if err := convert_api_FCVolumeSource_To_v1_FCVolumeSource(in.FC, out.FC, s); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -3001,6 +3040,29 @@ func convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction
return nil
}
func convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*FCVolumeSource))(in)
}
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*GCEPersistentDiskVolumeSource))(in)
@ -3820,6 +3882,14 @@ func convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Persist
} else {
out.CephFS = nil
}
if in.FC != nil {
out.FC = new(api.FCVolumeSource)
if err := convert_v1_FCVolumeSource_To_api_FCVolumeSource(in.FC, out.FC, s); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -4747,6 +4817,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.Volu
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(api.FCVolumeSource)
if err := convert_v1_FCVolumeSource_To_api_FCVolumeSource(in.FC, out.FC, s); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -4783,6 +4861,7 @@ func init() {
convert_api_EventSource_To_v1_EventSource,
convert_api_Event_To_v1_Event,
convert_api_ExecAction_To_v1_ExecAction,
convert_api_FCVolumeSource_To_v1_FCVolumeSource,
convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource,
convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource,
convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource,
@ -4897,6 +4976,7 @@ func init() {
convert_v1_EventSource_To_api_EventSource,
convert_v1_Event_To_api_Event,
convert_v1_ExecAction_To_api_ExecAction,
convert_v1_FCVolumeSource_To_api_FCVolumeSource,
convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource,
convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource,
convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource,

View File

@ -587,6 +587,26 @@ func deepCopy_v1_ExecAction(in ExecAction, out *ExecAction, c *conversion.Cloner
return nil
}
func deepCopy_v1_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conversion.Cloner) error {
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_v1_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, c *conversion.Cloner) error {
out.PDName = in.PDName
out.FSType = in.FSType
@ -1287,6 +1307,14 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste
} else {
out.CephFS = nil
}
if in.FC != nil {
out.FC = new(FCVolumeSource)
if err := deepCopy_v1_FCVolumeSource(*in.FC, out.FC, c); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -2181,6 +2209,14 @@ func deepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(FCVolumeSource)
if err := deepCopy_v1_FCVolumeSource(*in.FC, out.FC, c); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -2240,6 +2276,7 @@ func init() {
deepCopy_v1_EventList,
deepCopy_v1_EventSource,
deepCopy_v1_ExecAction,
deepCopy_v1_FCVolumeSource,
deepCopy_v1_GCEPersistentDiskVolumeSource,
deepCopy_v1_GitRepoVolumeSource,
deepCopy_v1_GlusterfsVolumeSource,

View File

@ -252,6 +252,8 @@ type VolumeSource struct {
// DownwardAPI represents downward API about the pod that should populate this volume
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
FC *FCVolumeSource `json:"fc,omitempty"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@ -302,6 +304,8 @@ type PersistentVolumeSource struct {
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
CephFS *CephFSVolumeSource `json:"cephfs,omitempty"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
FC *FCVolumeSource `json:"fc,omitempty"`
}
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
@ -706,6 +710,22 @@ type ISCSIVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// A Fibre Channel Disk can only be mounted as read/write once.
type FCVolumeSource struct {
// Required: FC target world wide names (WWNs)
TargetWWNs []string `json:"targetWWNs"`
// Required: FC target lun number
Lun *int `json:"lun"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each

View File

@ -387,6 +387,18 @@ func (ExecAction) SwaggerDoc() map[string]string {
return map_ExecAction
}
var map_FCVolumeSource = map[string]string{
"": "A Fibre Channel Disk can only be mounted as read/write once.",
"targetWWNs": "Required: FC target world wide names (WWNs)",
"lun": "Required: FC target lun number",
"fsType": "Required: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\"",
"readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
}
func (FCVolumeSource) SwaggerDoc() map[string]string {
return map_FCVolumeSource
}
var map_GCEPersistentDiskVolumeSource = map[string]string{
"": "GCEPersistentDiskVolumeSource represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist and be formatted before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once.",
"pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk",
@ -844,6 +856,7 @@ var map_PersistentVolumeSource = map[string]string{
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
"fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@ -1376,6 +1389,7 @@ var map_VolumeSource = map[string]string{
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
"downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
"fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
}
func (VolumeSource) SwaggerDoc() map[string]string {

View File

@ -390,6 +390,10 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
numVolumes++
allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI).Prefix("downwardApi")...)
}
if source.FC != nil {
numVolumes++
allErrs = append(allErrs, validateFCVolumeSource(source.FC).Prefix("fc")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required"))
}
@ -430,6 +434,25 @@ func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource) errs.ValidationErro
return allErrs
}
func validateFCVolumeSource(fc *api.FCVolumeSource) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if len(fc.TargetWWNs) < 1 {
allErrs = append(allErrs, errs.NewFieldRequired("targetWWNs"))
}
if fc.FSType == "" {
allErrs = append(allErrs, errs.NewFieldRequired("fsType"))
}
if fc.Lun == nil {
allErrs = append(allErrs, errs.NewFieldRequired("lun"))
} else {
if *fc.Lun < 0 || *fc.Lun > 255 {
allErrs = append(allErrs, errs.NewFieldInvalid("lun", fc.Lun, ""))
}
}
return allErrs
}
func validateGCEPersistentDiskVolumeSource(PD *api.GCEPersistentDiskVolumeSource) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if PD.PDName == "" {
@ -624,6 +647,10 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) errs.ValidationErrorList
numVolumes++
allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder).Prefix("cinder")...)
}
if pv.Spec.FC != nil {
numVolumes++
allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC).Prefix("fc")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", pv.Spec.PersistentVolumeSource, "exactly 1 volume type is required"))
}

View File

@ -446,6 +446,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
}
func TestValidateVolumes(t *testing.T) {
lun := 1
successCase := []api.Volume{
{Name: "abc", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path1"}}},
{Name: "123", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/mnt/path2"}}},
@ -486,12 +487,13 @@ func TestValidateVolumes(t *testing.T) {
APIVersion: "v1",
FieldPath: "metadata.labels"}},
}}}},
{Name: "fc", VolumeSource: api.VolumeSource{FC: &api.FCVolumeSource{[]string{"some_wwn"}, &lun, "ext4", false}}},
}
names, errs := validateVolumes(successCase)
if len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder", "cephfs") {
if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder", "cephfs", "fc") {
t.Errorf("wrong names result: %v", names)
}
emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}
@ -527,6 +529,8 @@ func TestValidateVolumes(t *testing.T) {
APIVersion: "v1",
FieldPath: "metadata.labels"}}},
}}
zeroWWN := api.VolumeSource{FC: &api.FCVolumeSource{[]string{}, &lun, "ext4", false}}
emptyLun := api.VolumeSource{FC: &api.FCVolumeSource{[]string{"wwn"}, nil, "ext4", false}}
errorCases := map[string]struct {
V []api.Volume
T errors.ValidationErrorType
@ -549,6 +553,8 @@ func TestValidateVolumes(t *testing.T) {
"dot dot path": {[]api.Volume{{Name: "dotdotpath", VolumeSource: dotDotInPath}}, errors.ValidationErrorTypeInvalid, "[0].source.downwardApi.path", "must not contain \"..\"."},
"dot dot file name": {[]api.Volume{{Name: "dotdotfilename", VolumeSource: dotDotPathName}}, errors.ValidationErrorTypeInvalid, "[0].source.downwardApi.path", "must not start with \"..\"."},
"dot dot first level dirent ": {[]api.Volume{{Name: "dotdotdirfilename", VolumeSource: dotDotFirstLevelDirent}}, errors.ValidationErrorTypeInvalid, "[0].source.downwardApi.path", "must not start with \"..\"."},
"empty wwn": {[]api.Volume{{Name: "badimage", VolumeSource: zeroWWN}}, errors.ValidationErrorTypeRequired, "[0].source.fc.targetWWNs", ""},
"empty lun": {[]api.Volume{{Name: "badimage", VolumeSource: emptyLun}}, errors.ValidationErrorTypeRequired, "[0].source.fc.lun", ""},
}
for k, v := range errorCases {
_, errs := validateVolumes(v.V)

View File

@ -253,6 +253,26 @@ func deepCopy_api_ExecAction(in api.ExecAction, out *api.ExecAction, c *conversi
return nil
}
func deepCopy_api_FCVolumeSource(in api.FCVolumeSource, out *api.FCVolumeSource, c *conversion.Cloner) error {
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_GCEPersistentDiskVolumeSource(in api.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, c *conversion.Cloner) error {
out.PDName = in.PDName
out.FSType = in.FSType
@ -728,6 +748,14 @@ func deepCopy_api_VolumeSource(in api.VolumeSource, out *api.VolumeSource, c *co
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(api.FCVolumeSource)
if err := deepCopy_api_FCVolumeSource(*in.FC, out.FC, c); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -1281,6 +1309,7 @@ func init() {
deepCopy_api_EnvVar,
deepCopy_api_EnvVarSource,
deepCopy_api_ExecAction,
deepCopy_api_FCVolumeSource,
deepCopy_api_GCEPersistentDiskVolumeSource,
deepCopy_api_GitRepoVolumeSource,
deepCopy_api_GlusterfsVolumeSource,

View File

@ -288,6 +288,29 @@ func convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAct
return nil
}
func convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.FCVolumeSource))(in)
}
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.GCEPersistentDiskVolumeSource))(in)
@ -774,6 +797,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.V
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(v1.FCVolumeSource)
if err := convert_api_FCVolumeSource_To_v1_FCVolumeSource(in.FC, out.FC, s); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -1037,6 +1068,29 @@ func convert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAct
return nil
}
func convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*v1.FCVolumeSource))(in)
}
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*v1.GCEPersistentDiskVolumeSource))(in)
@ -1523,6 +1577,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.V
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(api.FCVolumeSource)
if err := convert_v1_FCVolumeSource_To_api_FCVolumeSource(in.FC, out.FC, s); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -2572,6 +2634,7 @@ func init() {
convert_api_EnvVarSource_To_v1_EnvVarSource,
convert_api_EnvVar_To_v1_EnvVar,
convert_api_ExecAction_To_v1_ExecAction,
convert_api_FCVolumeSource_To_v1_FCVolumeSource,
convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource,
convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource,
convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource,
@ -2643,6 +2706,7 @@ func init() {
convert_v1_EnvVarSource_To_api_EnvVarSource,
convert_v1_EnvVar_To_api_EnvVar,
convert_v1_ExecAction_To_api_ExecAction,
convert_v1_FCVolumeSource_To_api_FCVolumeSource,
convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource,
convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource,
convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource,

View File

@ -291,6 +291,26 @@ func deepCopy_v1_ExecAction(in v1.ExecAction, out *v1.ExecAction, c *conversion.
return nil
}
func deepCopy_v1_FCVolumeSource(in v1.FCVolumeSource, out *v1.FCVolumeSource, c *conversion.Cloner) error {
if in.TargetWWNs != nil {
out.TargetWWNs = make([]string, len(in.TargetWWNs))
for i := range in.TargetWWNs {
out.TargetWWNs[i] = in.TargetWWNs[i]
}
} else {
out.TargetWWNs = nil
}
if in.Lun != nil {
out.Lun = new(int)
*out.Lun = *in.Lun
} else {
out.Lun = nil
}
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_v1_GCEPersistentDiskVolumeSource(in v1.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, c *conversion.Cloner) error {
out.PDName = in.PDName
out.FSType = in.FSType
@ -767,6 +787,14 @@ func deepCopy_v1_VolumeSource(in v1.VolumeSource, out *v1.VolumeSource, c *conve
} else {
out.DownwardAPI = nil
}
if in.FC != nil {
out.FC = new(v1.FCVolumeSource)
if err := deepCopy_v1_FCVolumeSource(*in.FC, out.FC, c); err != nil {
return err
}
} else {
out.FC = nil
}
return nil
}
@ -1307,6 +1335,7 @@ func init() {
deepCopy_v1_EnvVar,
deepCopy_v1_EnvVarSource,
deepCopy_v1_ExecAction,
deepCopy_v1_FCVolumeSource,
deepCopy_v1_GCEPersistentDiskVolumeSource,
deepCopy_v1_GitRepoVolumeSource,
deepCopy_v1_GlusterfsVolumeSource,

View File

@ -0,0 +1,112 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"os"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
)
// Abstract interface to disk operations.
type diskManager interface {
MakeGlobalPDName(disk fcDisk) string
// Attaches the disk to the kubelet's host machine.
AttachDisk(b fcDiskBuilder) error
// Detaches the disk from the kubelet's host machine.
DetachDisk(disk fcDiskCleaner, mntPath string) error
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b fcDiskBuilder, volPath string, mounter mount.Interface) error {
globalPDPath := manager.MakeGlobalPDName(*b.fcDisk)
// TODO: handle failed mounts here.
noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if !noMnt {
return nil
}
if err := manager.AttachDisk(b); err != nil {
glog.Errorf("failed to attach disk")
return err
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
err = mounter.Mount(globalPDPath, volPath, "", options)
if err != nil {
glog.Errorf("failed to bind mount:%s", globalPDPath)
return err
}
return nil
}
// utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, c fcDiskCleaner, volPath string, mounter mount.Interface) error {
noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil {
glog.Errorf("cannot validate mountpoint %s", volPath)
return err
}
if noMnt {
return os.Remove(volPath)
}
refs, err := mount.GetMountRefs(mounter, volPath)
if err != nil {
glog.Errorf("failed to get reference count %s", volPath)
return err
}
if err := mounter.Unmount(volPath); err != nil {
glog.Errorf("failed to unmount %s", volPath)
return err
}
// If len(refs) is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 {
mntPath := refs[0]
if err := manager.DetachDisk(c, mntPath); err != nil {
glog.Errorf("failed to detach disk from %s", mntPath)
return err
}
}
noMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil {
glog.Errorf("isMountpoint check failed: %v", mntErr)
return err
}
if noMnt {
if err := os.Remove(volPath); err != nil {
return err
}
}
return nil
}

19
pkg/volume/fc/doc.go Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fc contains the internal representation of
// Fibre Channel (fc) volumes.
package fc

198
pkg/volume/fc/fc.go Normal file
View File

@ -0,0 +1,198 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"strconv"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&fcPlugin{nil, exec.New()}}
}
type fcPlugin struct {
host volume.VolumeHost
exe exec.Interface
}
var _ volume.VolumePlugin = &fcPlugin{}
var _ volume.PersistentVolumePlugin = &fcPlugin{}
const (
fcPluginName = "kubernetes.io/fc"
)
func (plugin *fcPlugin) Init(host volume.VolumeHost) {
plugin.host = host
}
func (plugin *fcPlugin) Name() string {
return fcPluginName
}
func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool {
if (spec.Volume != nil && spec.Volume.FC == nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FC == nil) {
return false
}
// TODO: turn this into a func so CanSupport can be unit tested without
// having to make system calls
// see if /sys/class/fc_transport is there, which indicates fc is connected
_, err := plugin.execCommand("ls", []string{"/sys/class/fc_transport"})
if err == nil {
return true
}
return false
}
func (plugin *fcPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
}
}
func (plugin *fcPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) {
// Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter())
}
func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool
var fc *api.FCVolumeSource
if spec.Volume != nil && spec.Volume.FC != nil {
fc = spec.Volume.FC
readOnly = fc.ReadOnly
} else {
fc = spec.PersistentVolume.Spec.FC
readOnly = spec.ReadOnly
}
if fc.Lun == nil {
return nil, fmt.Errorf("empty lun")
}
lun := strconv.Itoa(*fc.Lun)
return &fcDiskBuilder{
fcDisk: &fcDisk{
podUID: podUID,
volName: spec.Name(),
wwns: fc.TargetWWNs,
lun: lun,
manager: manager,
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
io: &osIOHandler{},
plugin: plugin},
fsType: fc.FSType,
readOnly: readOnly,
}, nil
}
func (plugin *fcPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
// Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter())
}
func (plugin *fcPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) {
return &fcDiskCleaner{&fcDisk{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
io: &osIOHandler{},
}}, nil
}
func (plugin *fcPlugin) execCommand(command string, args []string) ([]byte, error) {
cmd := plugin.exe.Command(command, args...)
return cmd.CombinedOutput()
}
type fcDisk struct {
volName string
podUID types.UID
portal string
wwns []string
lun string
plugin *fcPlugin
mounter mount.Interface
// Utility interface that provides API calls to the provider to attach/detach disks.
manager diskManager
// io handler interface
io ioHandler
}
func (fc *fcDisk) GetPath() string {
name := fcPluginName
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
return fc.plugin.host.GetPodVolumeDir(fc.podUID, util.EscapeQualifiedNameForDisk(name), fc.volName)
}
type fcDiskBuilder struct {
*fcDisk
readOnly bool
fsType string
}
var _ volume.Builder = &fcDiskBuilder{}
func (b *fcDiskBuilder) SetUp() error {
return b.SetUpAt(b.GetPath())
}
func (b *fcDiskBuilder) SetUpAt(dir string) error {
// diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(b.manager, *b, dir, b.mounter)
if err != nil {
glog.Errorf("fc: failed to setup")
}
return err
}
type fcDiskCleaner struct {
*fcDisk
}
var _ volume.Cleaner = &fcDiskCleaner{}
func (b *fcDiskBuilder) IsReadOnly() bool {
return b.readOnly
}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (c *fcDiskCleaner) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *fcDiskCleaner) TearDownAt(dir string) error {
return diskTearDown(c.manager, *c, dir, c.mounter)
}

252
pkg/volume/fc/fc_test.go Normal file
View File

@ -0,0 +1,252 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.Name() != "kubernetes.io/fc" {
t.Errorf("Wrong name: %s", plug.Name())
}
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
func TestGetAccessModes(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", api.ReadWriteOnce, api.ReadOnlyMany)
}
}
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
type fakeDiskManager struct {
attachCalled bool
detachCalled bool
}
func (fake *fakeDiskManager) MakeGlobalPDName(disk fcDisk) string {
return "/tmp/fake_fc_path"
}
func (fake *fakeDiskManager) AttachDisk(b fcDiskBuilder) error {
globalPath := b.manager.MakeGlobalPDName(*b.fcDisk)
err := os.MkdirAll(globalPath, 0750)
if err != nil {
return err
}
// Simulate the global mount so that the fakeMounter returns the
// expected number of mounts for the attached disk.
b.mounter.Mount(globalPath, globalPath, b.fsType, nil)
fake.attachCalled = true
return nil
}
func (fake *fakeDiskManager) DetachDisk(c fcDiskCleaner, mntPath string) error {
globalPath := c.manager.MakeGlobalPDName(*c.fcDisk)
err := os.RemoveAll(globalPath)
if err != nil {
return err
}
fake.detachCalled = true
return nil
}
func doTestPlugin(t *testing.T, spec *volume.Spec) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fakeManager := &fakeDiskManager{}
fakeMounter := &mount.FakeMounter{}
builder, err := plug.(*fcPlugin).newBuilderInternal(spec, types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder: %v")
}
path := builder.GetPath()
if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~fc/vol1" {
t.Errorf("Got unexpected path: %s", path)
}
if err := builder.SetUp(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if !fakeManager.attachCalled {
t.Errorf("Attach was not called")
}
fakeManager = &fakeDiskManager{}
cleaner, err := plug.(*fcPlugin).newCleanerInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err)
}
if cleaner == nil {
t.Errorf("Got a nil Cleaner: %v")
}
if err := cleaner.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
if !fakeManager.detachCalled {
t.Errorf("Detach was not called")
}
}
func TestPluginVolume(t *testing.T) {
lun := 0
vol := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
FC: &api.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
lun := 0
vol := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "vol1",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
FC: &api.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
lun := 0
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
FC: &api.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
o := testclient.NewObjects(api.Scheme, api.Scheme)
o.Add(pv)
o.Add(claim)
client := &testclient.Fake{}
client.AddReactor("*", "*", testclient.ObjectReaction(o, testapi.Default.RESTMapper()))
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(fcPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{})
if !builder.IsReadOnly() {
t.Errorf("Expected true for builder.IsReadOnly")
}
}

200
pkg/volume/fc/fc_util.go Normal file
View File

@ -0,0 +1,200 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/volume"
)
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
EvalSymlinks(path string) (string, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
}
type osIOHandler struct{}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(name)
}
func (handler *osIOHandler) EvalSymlinks(path string) (string, error) {
return filepath.EvalSymlinks(path)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
// given a disk path like /dev/sdx, find the devicemapper parent
func findMultipathDeviceMapper(disk string, io ioHandler) string {
sys_path := "/sys/block/"
if dirs, err := io.ReadDir(sys_path); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.HasPrefix(name, "dm-") {
if _, err1 := io.Lstat(sys_path + name + "/slaves/" + disk); err1 == nil {
return "/dev/" + name
}
}
}
}
return ""
}
// given a wwn and lun, find the device and associated devicemapper parent
func findDisk(wwn, lun string, io ioHandler) (string, string) {
fc_path := "-fc-0x" + wwn + "-lun-" + lun
dev_path := "/dev/disk/by-path/"
if dirs, err := io.ReadDir(dev_path); err == nil {
for _, f := range dirs {
name := f.Name()
if strings.Contains(name, fc_path) {
if disk, err1 := io.EvalSymlinks(dev_path + name); err1 == nil {
arr := strings.Split(disk, "/")
l := len(arr) - 1
dev := arr[l]
dm := findMultipathDeviceMapper(dev, io)
return disk, dm
}
}
}
}
return "", ""
}
func createMultipathConf(path string, io ioHandler) {
if _, err := os.Lstat(path); err != nil {
data := []byte(`defaults {
find_multipaths yes
user_friendly_names yes
}
blacklist {
}
`)
io.WriteFile(path, data, 0664)
}
}
// rescan scsi bus
func scsiHostRescan(io ioHandler) {
scsi_path := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsi_path); err == nil {
for _, f := range dirs {
name := scsi_path + f.Name() + "/scan"
data := []byte("- - -")
io.WriteFile(name, data, 0666)
}
}
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/fc/target-lun-0
func makePDNameInternal(host volume.VolumeHost, wwns []string, lun string) string {
return path.Join(host.GetPluginDir(fcPluginName), wwns[0]+"-lun-"+lun)
}
type FCUtil struct{}
func (util *FCUtil) MakeGlobalPDName(fc fcDisk) string {
return makePDNameInternal(fc.plugin.host, fc.wwns, fc.lun)
}
func searchDisk(wwns []string, lun string, io ioHandler) (string, string) {
disk := ""
dm := ""
rescaned := false
// two-phase search:
// first phase, search existing device path, if a multipath dm is found, exit loop
// otherwise, in second phase, rescan scsi bus and search again, return with any findings
for true {
for _, wwn := range wwns {
disk, dm = findDisk(wwn, lun, io)
// if multipath device is found, break
if dm != "" {
break
}
}
// if a dm is found, exit loop
if rescaned || dm != "" {
break
}
// rescan and search again
// create multipath conf if it is not there
createMultipathConf("/etc/multipath.conf", io)
// rescan scsi bus
scsiHostRescan(io)
rescaned = true
}
return disk, dm
}
func (util *FCUtil) AttachDisk(b fcDiskBuilder) error {
devicePath := ""
wwns := b.wwns
lun := b.lun
io := b.io
disk, dm := searchDisk(wwns, lun, io)
// if no disk matches input wwn and lun, exit
if disk == "" && dm == "" {
return fmt.Errorf("no fc disk found")
}
// if multipath devicemapper device is found, use it; otherwise use raw disk
if dm != "" {
devicePath = dm
} else {
devicePath = disk
}
// mount it
globalPDPath := b.manager.MakeGlobalPDName(*b.fcDisk)
noMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if !noMnt {
glog.Infof("fc: %s already mounted", globalPDPath)
return nil
}
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return fmt.Errorf("fc: failed to mkdir %s, error", globalPDPath)
}
err = b.mounter.Mount(devicePath, globalPDPath, b.fsType, nil)
if err != nil {
return fmt.Errorf("fc: failed to mount fc volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
}
return err
}
func (util *FCUtil) DetachDisk(c fcDiskCleaner, mntPath string) error {
if err := c.mounter.Unmount(mntPath); err != nil {
return fmt.Errorf("fc detach disk: failed to unmount: %s\nError: %v", mntPath, err)
}
return nil
}

View File

@ -0,0 +1,91 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fc
import (
"os"
"testing"
"time"
)
type fakeFileInfo struct {
name string
}
func (fi *fakeFileInfo) Name() string {
return fi.name
}
func (fi *fakeFileInfo) Size() int64 {
return 0
}
func (fi *fakeFileInfo) Mode() os.FileMode {
return 777
}
func (fi *fakeFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *fakeFileInfo) IsDir() bool {
return false
}
func (fi *fakeFileInfo) Sys() interface{} {
return nil
}
type fakeIOHandler struct{}
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/dev/disk/by-path/":
f := &fakeFileInfo{
name: "pci-0000:41:00.0-fc-0x500a0981891b8dc5-lun-0",
}
return []os.FileInfo{f}, nil
case "/sys/block/":
f := &fakeFileInfo{
name: "dm-1",
}
return []os.FileInfo{f}, nil
}
return nil, nil
}
func (handler *fakeIOHandler) Lstat(name string) (os.FileInfo, error) {
return nil, nil
}
func (handler *fakeIOHandler) EvalSymlinks(path string) (string, error) {
return "/dev/sda", nil
}
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return nil
}
func TestIoHandler(t *testing.T) {
io := &fakeIOHandler{}
wwns := []string{"500a0981891b8dc5"}
lun := "0"
disk, dm := searchDisk(wwns, lun, io)
// if no disk matches input wwn and lun, exit
if disk == "" && dm == "" {
t.Errorf("no fc disk found")
}
}