add rados block device(rbd) volume plugin

Signed-off-by: Huamin Chen <hchen@redhat.com>
pull/6/head
Huamin Chen 2015-04-07 13:22:23 -04:00
parent 9b1fb6dca1
commit 4a800fd10e
22 changed files with 1099 additions and 1 deletions

View File

@ -34,6 +34,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/iscsi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/nfs"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/persistent_claim"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/rbd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/secret"
//Cloud providers
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/aws"
@ -62,6 +63,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, persistent_claim.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -178,6 +178,10 @@ func TestExampleObjectSchemas(t *testing.T) {
"pod": &api.Pod{},
"replication": &api.ReplicationController{},
},
"../examples/rbd/v1beta3": {
"rbd": &api.Pod{},
"rbd-with-secret": &api.Pod{},
},
}
for path, expected := range cases {

48
examples/rbd/README.md Normal file
View File

@ -0,0 +1,48 @@
# How to Use it?
Install Ceph on the Kubernetes host. For example, on Fedora 21
# yum -y install ceph
If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/rootfs/docker-ceph)
Then get the keyring from the Ceph cluster and copy it to */etc/ceph/keyring*.
Once you have installed Ceph and new Kubernetes, you can create a pod based on my examples [rbd.json](v1beta3/rbd.json) [rbd-with-secret.json](v1beta3/rbd-with-secret.json). In the pod JSON, you need to provide the following information.
- *monitors*: Ceph monitors.
- *pool*: The name of the RADOS pool, if not provided, default *rbd* pool is used.
- *image*: The image name that rbd has created.
- *user*: The RADOS user name. If not provided, default *admin* is used.
- *keyring*: The path to the keyring file. If not provided, default */etc/ceph/keyring* is used.
- *secretName*: The name of the authentication secrets. If provided, *secretName* overrides *keyring*. Note, see below about how to create a secret.
- *fsType*: The filesystem type (ext4, xfs, etc) that formatted on the device.
- *readOnly*: Whether the filesystem is used as readOnly.
# Use Ceph Authentication Secret
If Ceph authentication secret is provided, the secret should be first be base64 encoded, then encoded string is placed in a secret yaml. An example yaml is provided [here](secret/ceph-secret.yaml). Then post the secret through ```kubectl``` in the following command.
```console
# cluster/kubectl.sh create -f examples/rbd/secret/ceph-secret.yaml
```
# Get started
Here are my commands:
```console
# cluster/kubectl.sh create -f examples/rbd/v1beta3/rbd.json
# cluster/kubectl.sh get pods
```
On the Kubernetes host, I got these in mount output
```console
#mount |grep kub
/dev/rbd0 on /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/kube-image-foo type ext4 (ro,relatime,stripe=4096,data=ordered)
/dev/rbd0 on /var/lib/kubelet/pods/ec2166b4-de07-11e4-aaf5-d4bed9b39058/volumes/kubernetes.io~rbd/rbdpd type ext4 (ro,relatime,stripe=4096,data=ordered)
```
If you ssh to that machine, you can run `docker ps` to see the actual pod and `docker inspect` to see the volumes used by the container.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/rbd/README.md?pixel)]()

View File

@ -0,0 +1,6 @@
apiVersion: v1beta3
kind: Secret
metadata:
name: ceph-secret
data:
key: QVFCMTZWMVZvRjVtRXhBQTVrQ1FzN2JCajhWVUxSdzI2Qzg0SEE9PQ==

View File

@ -0,0 +1,42 @@
{
"apiVersion": "v1beta3",
"id": "rbdpd2",
"kind": "Pod",
"metadata": {
"name": "rbd2"
},
"spec": {
"containers": [
{
"name": "rbd-rw",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/rbd",
"name": "rbdpd"
}
]
}
],
"volumes": [
{
"name": "rbdpd",
"rbd": {
"monitors": [
"10.16.154.78:6789",
"10.16.154.82:6789",
"10.16.154.83:6789"
],
"pool": "kube",
"image": "foo",
"user": "admin",
"secretRef": {
"name": "ceph-secret"
},
"fsType": "ext4",
"readOnly": true
}
}
]
}
}

View File

@ -0,0 +1,40 @@
{
"apiVersion": "v1beta3",
"id": "rbdpd",
"kind": "Pod",
"metadata": {
"name": "rbd"
},
"spec": {
"containers": [
{
"name": "rbd-rw",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/rbd",
"name": "rbdpd"
}
]
}
],
"volumes": [
{
"name": "rbdpd",
"rbd": {
"monitors": [
"10.16.154.78:6789",
"10.16.154.82:6789",
"10.16.154.83:6789"
],
"pool": "kube",
"image": "foo",
"user": "admin",
"keyring": "/etc/ceph/keyring",
"fsType": "ext4",
"readOnly": true
}
}
]
}
}

View File

@ -205,6 +205,8 @@ type VolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs"`
// PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace
PersistentVolumeClaimVolumeSource *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -224,6 +226,8 @@ type PersistentVolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
NFS *NFSVolumeSource `json:"nfs"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -482,6 +486,30 @@ type GlusterfsVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// RBDVolumeSource represents a Rados Block Device Mount that lasts the lifetime of a pod
type RBDVolumeSource struct {
// Required: CephMonitors is a collection of Ceph monitors
CephMonitors []string `json:"monitors"`
// Required: RBDImage is the rados image name
RBDImage string `json:"image"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty"`
// Optional: RadosPool is the rados pool name,default is rbd
RBDPool string `json:"pool"`
// Optional: RBDUser is the rados user name, default is admin
RadosUser string `json:"user"`
// Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring
Keyring string `json:"keyring"`
// Optional: SecretRef is name of the authentication secret for RBDUser, default is empty.
SecretRef *LocalObjectReference `json:"secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty"`
}
// ContainerPort represents a network port in a single container
type ContainerPort struct {
// Optional: If specified, this must be a DNS_LABEL. Each named port

View File

@ -1277,6 +1277,14 @@ func convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.Per
} else {
out.NFS = nil
}
if in.RBD != nil {
out.RBD = new(RBDVolumeSource)
if err := convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in.RBD, out.RBD, s); err != nil {
return err
}
} else {
out.RBD = nil
}
return nil
}
@ -1604,6 +1612,35 @@ func convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope
return nil
}
func convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.RBDVolumeSource))(in)
}
if in.CephMonitors != nil {
out.CephMonitors = make([]string, len(in.CephMonitors))
for i := range in.CephMonitors {
out.CephMonitors[i] = in.CephMonitors[i]
}
} else {
out.CephMonitors = nil
}
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
if in.SecretRef != nil {
out.SecretRef = new(LocalObjectReference)
if err := convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.RangeAllocation))(in)
@ -2235,6 +2272,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *Volu
} else {
out.PersistentVolumeClaimVolumeSource = nil
}
if in.RBD != nil {
out.RBD = new(RBDVolumeSource)
if err := convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in.RBD, out.RBD, s); err != nil {
return err
}
} else {
out.RBD = nil
}
return nil
}
@ -3490,6 +3535,14 @@ func convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Persist
} else {
out.NFS = nil
}
if in.RBD != nil {
out.RBD = new(api.RBDVolumeSource)
if err := convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil {
return err
}
} else {
out.RBD = nil
}
return nil
}
@ -3817,6 +3870,35 @@ func convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope
return nil
}
func convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*RBDVolumeSource))(in)
}
if in.CephMonitors != nil {
out.CephMonitors = make([]string, len(in.CephMonitors))
for i := range in.CephMonitors {
out.CephMonitors[i] = in.CephMonitors[i]
}
} else {
out.CephMonitors = nil
}
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
if in.SecretRef != nil {
out.SecretRef = new(api.LocalObjectReference)
if err := convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*RangeAllocation))(in)
@ -4448,6 +4530,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.Volu
} else {
out.PersistentVolumeClaimVolumeSource = nil
}
if in.RBD != nil {
out.RBD = new(api.RBDVolumeSource)
if err := convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil {
return err
}
} else {
out.RBD = nil
}
return nil
}
@ -4533,6 +4623,7 @@ func init() {
convert_api_PodTemplate_To_v1_PodTemplate,
convert_api_Pod_To_v1_Pod,
convert_api_Probe_To_v1_Probe,
convert_api_RBDVolumeSource_To_v1_RBDVolumeSource,
convert_api_RangeAllocation_To_v1_RangeAllocation,
convert_api_ReplicationControllerList_To_v1_ReplicationControllerList,
convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec,
@ -4642,6 +4733,7 @@ func init() {
convert_v1_PodTemplate_To_api_PodTemplate,
convert_v1_Pod_To_api_Pod,
convert_v1_Probe_To_api_Probe,
convert_v1_RBDVolumeSource_To_api_RBDVolumeSource,
convert_v1_RangeAllocation_To_api_RangeAllocation,
convert_v1_ReplicationControllerList_To_api_ReplicationControllerList,
convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec,

View File

@ -222,6 +222,8 @@ type VolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" description:"Glusterfs volume that will be mounted on the host machine "`
// PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace
PersistentVolumeClaimVolumeSource *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" description:"a reference to a PersistentVolumeClaim in the same namespace"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd" description:"rados block volume that will be mounted on the host machine"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -249,6 +251,8 @@ type PersistentVolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" description:"Glusterfs volume resource provisioned by an admin"`
// NFS represents an NFS mount on the host
NFS *NFSVolumeSource `json:"nfs,omitempty" description:"NFS volume resource provisioned by an admin"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd" description:"rados block volume that will be mounted on the host machine"`
}
type PersistentVolume struct {
@ -385,6 +389,30 @@ type GlusterfsVolumeSource struct {
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
// RBDVolumeSource represents a Rados Block Device Mount that lasts the lifetime of a pod
type RBDVolumeSource struct {
// Required: CephMonitors is a collection of Ceph monitors
CephMonitors []string `json:"monitors" description:"a collection of Ceph monitors"`
// Required: RBDImage is the rados image name
RBDImage string `json:"image" description:"rados image name"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty" description:"file system type to mount, such as ext4, xfs, ntfs"`
// Optional: RadosPool is the rados pool name,default is rbd
RBDPool string `json:"pool" description:"rados pool name; default is rbd; optional"`
// Optional: RBDUser is the rados user name, default is admin
RadosUser string `json:"user" description:"rados user name; default is admin; optional"`
// Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring
Keyring string `json:"keyring" description:"keyring is the path to key ring for rados user; default is /etc/ceph/keyring; optional"`
// Optional: SecretRef is name of the authentication secret for RBDUser, default is empty.
SecretRef *LocalObjectReference `json:"secretRef" description:"name of a secret to authenticate the RBD user; if provided overrides keyring; optional"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty" description:"rbd volume to be mounted with read-only permissions"`
}
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)

View File

@ -1239,6 +1239,9 @@ func addConversionFuncs() {
if err := s.Convert(&in.PersistentVolumeClaimVolumeSource, &out.PersistentVolumeClaimVolumeSource, 0); err != nil {
return err
}
if err := s.Convert(&in.RBD, &out.RBD, 0); err != nil {
return err
}
return nil
},
func(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error {
@ -1272,6 +1275,10 @@ func addConversionFuncs() {
if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil {
return err
}
if err := s.Convert(&in.RBD, &out.RBD, 0); err != nil {
return err
}
return nil
},

View File

@ -133,6 +133,8 @@ type VolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume that will be mounted on the host machine "`
// PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace
PersistentVolumeClaimVolumeSource *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" description:"a reference to a PersistentVolumeClaim in the same namespace"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd" description:"rados block volume that will be mounted on the host machine"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -152,6 +154,8 @@ type PersistentVolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume resource provisioned by an admin"`
// NFS represents an NFS mount on the host
NFS *NFSVolumeSource `json:"nfs" description:"NFS volume resource provisioned by an admin"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd" description:"rados block volume that will be mounted on the host machine"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -372,6 +376,30 @@ type SecretVolumeSource struct {
Target ObjectReference `json:"target" description:"target is a reference to a secret"`
}
// RBDVolumeSource represents a Rados Block Device Mount that lasts the lifetime of a pod
type RBDVolumeSource struct {
// Required: CephMonitors is a collection of Ceph monitors
CephMonitors []string `json:"monitors" description:"a collection of Ceph monitors"`
// Required: RBDImage is the rados image name
RBDImage string `json:"image" description:"rados image name"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty" description:"file system type to mount, such as ext4, xfs, ntfs"`
// Optional: RadosPool is the rados pool name,default is rbd
RBDPool string `json:"pool" description:"rados pool name; default is rbd; optional"`
// Optional: RBDUser is the rados user name, default is admin
RadosUser string `json:"user" description:"rados user name; default is admin; optional"`
// Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring
Keyring string `json:"keyring" description:"keyring is the path to key ring for rados user; default is /etc/ceph/keyring; optional"`
// Optional: SecretRef is name of the authentication secret for RBDUser, default is empty.
SecretRef *LocalObjectReference `json:"secretRef" description:"name of a secret to authenticate the RBD user; if provided overrides keyring; optional"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty" description:"rbd volume to be mounted with read-only permissions"`
}
// ContainerPort represents a network port in a single container
type ContainerPort struct {
// Optional: If specified, this must be a DNS_LABEL. Each named port

View File

@ -1157,6 +1157,9 @@ func addConversionFuncs() {
if err := s.Convert(&in.PersistentVolumeClaimVolumeSource, &out.PersistentVolumeClaimVolumeSource, 0); err != nil {
return err
}
if err := s.Convert(&in.RBD, &out.RBD, 0); err != nil {
return err
}
return nil
},
func(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error {
@ -1190,6 +1193,9 @@ func addConversionFuncs() {
if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil {
return err
}
if err := s.Convert(&in.RBD, &out.RBD, 0); err != nil {
return err
}
return nil
},

View File

@ -90,6 +90,8 @@ type VolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume that will be mounted on the host machine "`
// PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace
PersistentVolumeClaimVolumeSource *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" description:"a reference to a PersistentVolumeClaim in the same namespace"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd" description:"rados block volume that will be mounted on the host machine"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -109,6 +111,8 @@ type PersistentVolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume resource provisioned by an admin"`
// NFS represents an NFS mount on the host
NFS *NFSVolumeSource `json:"nfs" description:"NFS volume resource provisioned by an admin"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd" description:"rados block volume that will be mounted on the host machine"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -369,6 +373,30 @@ type GlusterfsVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty" description:"glusterfs volume to be mounted with read-only permissions"`
}
// RBDVolumeSource represents a Rados Block Device Mount that lasts the lifetime of a pod
type RBDVolumeSource struct {
// Required: CephMonitors is a collection of Ceph monitors
CephMonitors []string `json:"monitors" description:"a collection of Ceph monitors"`
// Required: RBDImage is the rados image name
RBDImage string `json:"image" description:"rados image name"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty" description:"file system type to mount, such as ext4, xfs, ntfs"`
// Optional: RadosPool is the rados pool name,default is rbd
RBDPool string `json:"pool" description:"rados pool name; default is rbd; optional"`
// Optional: RBDUser is the rados user name, default is admin
RadosUser string `json:"user" description:"rados user name; default is admin; optional"`
// Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring
Keyring string `json:"keyring" description:"keyring is the path to key ring for rados user; default is /etc/ceph/keyring; optional"`
// Optional: SecretRef is name of the authentication secret for RBDUser, default is empty.
SecretRef *LocalObjectReference `json:"secretRef" description:"name of a secret to authenticate the RBD user; if provided overrides keyring; optional"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty" description:"rbd volume to be mounted with read-only permissions"`
}
// VolumeMount describes a mounting of a Volume within a container.
//
// http://docs.k8s.io/volumes.md

View File

@ -1184,6 +1184,14 @@ func convert_api_PersistentVolumeSource_To_v1beta3_PersistentVolumeSource(in *ap
} else {
out.NFS = nil
}
if in.RBD != nil {
out.RBD = new(RBDVolumeSource)
if err := convert_api_RBDVolumeSource_To_v1beta3_RBDVolumeSource(in.RBD, out.RBD, s); err != nil {
return err
}
} else {
out.RBD = nil
}
return nil
}
@ -1511,6 +1519,35 @@ func convert_api_Probe_To_v1beta3_Probe(in *api.Probe, out *Probe, s conversion.
return nil
}
func convert_api_RBDVolumeSource_To_v1beta3_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.RBDVolumeSource))(in)
}
if in.CephMonitors != nil {
out.CephMonitors = make([]string, len(in.CephMonitors))
for i := range in.CephMonitors {
out.CephMonitors[i] = in.CephMonitors[i]
}
} else {
out.CephMonitors = nil
}
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
if in.SecretRef != nil {
out.SecretRef = new(LocalObjectReference)
if err := convert_api_LocalObjectReference_To_v1beta3_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func convert_api_RangeAllocation_To_v1beta3_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.RangeAllocation))(in)
@ -2172,6 +2209,14 @@ func convert_api_VolumeSource_To_v1beta3_VolumeSource(in *api.VolumeSource, out
} else {
out.PersistentVolumeClaimVolumeSource = nil
}
if in.RBD != nil {
out.RBD = new(RBDVolumeSource)
if err := convert_api_RBDVolumeSource_To_v1beta3_RBDVolumeSource(in.RBD, out.RBD, s); err != nil {
return err
}
} else {
out.RBD = nil
}
return nil
}
@ -3334,6 +3379,14 @@ func convert_v1beta3_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Pe
} else {
out.NFS = nil
}
if in.RBD != nil {
out.RBD = new(api.RBDVolumeSource)
if err := convert_v1beta3_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil {
return err
}
} else {
out.RBD = nil
}
return nil
}
@ -3661,6 +3714,35 @@ func convert_v1beta3_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.
return nil
}
func convert_v1beta3_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*RBDVolumeSource))(in)
}
if in.CephMonitors != nil {
out.CephMonitors = make([]string, len(in.CephMonitors))
for i := range in.CephMonitors {
out.CephMonitors[i] = in.CephMonitors[i]
}
} else {
out.CephMonitors = nil
}
out.RBDImage = in.RBDImage
out.FSType = in.FSType
out.RBDPool = in.RBDPool
out.RadosUser = in.RadosUser
out.Keyring = in.Keyring
if in.SecretRef != nil {
out.SecretRef = new(api.LocalObjectReference)
if err := convert_v1beta3_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func convert_v1beta3_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*RangeAllocation))(in)
@ -4322,6 +4404,14 @@ func convert_v1beta3_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api
} else {
out.PersistentVolumeClaimVolumeSource = nil
}
if in.RBD != nil {
out.RBD = new(api.RBDVolumeSource)
if err := convert_v1beta3_RBDVolumeSource_To_api_RBDVolumeSource(in.RBD, out.RBD, s); err != nil {
return err
}
} else {
out.RBD = nil
}
return nil
}
@ -4406,6 +4496,7 @@ func init() {
convert_api_PodTemplate_To_v1beta3_PodTemplate,
convert_api_Pod_To_v1beta3_Pod,
convert_api_Probe_To_v1beta3_Probe,
convert_api_RBDVolumeSource_To_v1beta3_RBDVolumeSource,
convert_api_RangeAllocation_To_v1beta3_RangeAllocation,
convert_api_ReplicationControllerList_To_v1beta3_ReplicationControllerList,
convert_api_ReplicationControllerSpec_To_v1beta3_ReplicationControllerSpec,
@ -4516,6 +4607,7 @@ func init() {
convert_v1beta3_PodTemplate_To_api_PodTemplate,
convert_v1beta3_Pod_To_api_Pod,
convert_v1beta3_Probe_To_api_Probe,
convert_v1beta3_RBDVolumeSource_To_api_RBDVolumeSource,
convert_v1beta3_RangeAllocation_To_api_RangeAllocation,
convert_v1beta3_ReplicationControllerList_To_api_ReplicationControllerList,
convert_v1beta3_ReplicationControllerSpec_To_api_ReplicationControllerSpec,

View File

@ -222,6 +222,8 @@ type VolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" description:"Glusterfs volume that will be mounted on the host machine "`
// PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace
PersistentVolumeClaimVolumeSource *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" description:"a reference to a PersistentVolumeClaim in the same namespace"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd" description:"rados block volume that will be mounted on the host machine"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -249,6 +251,8 @@ type PersistentVolumeSource struct {
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" description:"Glusterfs volume resource provisioned by an admin"`
// NFS represents an NFS mount on the host
NFS *NFSVolumeSource `json:"nfs,omitempty" description:"NFS volume resource provisioned by an admin"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd" description:"rados block volume that will be mounted on the host machine"`
}
type PersistentVolume struct {
@ -385,6 +389,30 @@ type GlusterfsVolumeSource struct {
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
// RBDVolumeSource represents a Rados Block Device Mount that lasts the lifetime of a pod
type RBDVolumeSource struct {
// Required: CephMonitors is a collection of Ceph monitors
CephMonitors []string `json:"monitors" description:"a collection of Ceph monitors"`
// Required: RBDImage is the rados image name
RBDImage string `json:"image" description:"rados image name"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs"
// TODO: how do we prevent errors in the filesystem from compromising the machine
FSType string `json:"fsType,omitempty" description:"file system type to mount, such as ext4, xfs, ntfs"`
// Optional: RadosPool is the rados pool name,default is rbd
RBDPool string `json:"pool" description:"rados pool name; default is rbd; optional"`
// Optional: RBDUser is the rados user name, default is admin
RadosUser string `json:"user" description:"rados user name; default is admin; optional"`
// Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring
Keyring string `json:"keyring" description:"keyring is the path to key ring for rados user; default is /etc/ceph/keyring; optional"`
// Optional: SecretRef is name of the authentication secret for RBDUser, default is empty.
SecretRef *LocalObjectReference `json:"secretRef" description:"name of a secret to authenticate the RBD user; if provided overrides keyring; optional"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty" description:"rbd volume to be mounted with read-only permissions"`
}
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)

View File

@ -151,6 +151,7 @@ func (s *SwaggerSchema) validateField(value interface{}, apiVersion, fieldName,
return NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)
}
var arrType string
glog.Infof("field detail %v", fieldDetails)
if fieldDetails.Items.Ref == nil && fieldDetails.Items.Type == nil {
return NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)
}

View File

@ -342,10 +342,14 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
numVolumes++
allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaimVolumeSource).Prefix("persistentVolumeClaim")...)
}
if source.RBD != nil {
numVolumes++
allErrs = append(allErrs, validateRBD(source.RBD).Prefix("rbd")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required"))
}
return allErrs
}
@ -451,6 +455,20 @@ func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource) errs.ValidationErro
return allErrs
}
func validateRBD(rbd *api.RBDVolumeSource) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if len(rbd.CephMonitors) == 0 {
allErrs = append(allErrs, errs.NewFieldRequired("monitors"))
}
if rbd.RBDImage == "" {
allErrs = append(allErrs, errs.NewFieldRequired("image"))
}
if rbd.FSType == "" {
allErrs = append(allErrs, errs.NewFieldRequired("fsType"))
}
return allErrs
}
func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) {
return nameIsDNSSubdomain(name, prefix)
}
@ -496,6 +514,10 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) errs.ValidationErrorList
numVolumes++
allErrs = append(allErrs, validateNFS(pv.Spec.NFS).Prefix("nfs")...)
}
if pv.Spec.RBD != nil {
numVolumes++
allErrs = append(allErrs, validateRBD(pv.Spec.RBD).Prefix("rbd")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", pv.Spec.PersistentVolumeSource, "exactly 1 volume type is required"))
}

View File

@ -434,6 +434,7 @@ func TestValidateVolumes(t *testing.T) {
{Name: "iscsidisk", VolumeSource: api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "iqn.2015-02.example.com:test", 1, "ext4", false}}},
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{"my-secret"}}},
{Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"host1", "path", false}}},
{Name: "rbd", VolumeSource: api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "bar", FSType: "ext4"}}},
}
names, errs := validateVolumes(successCase)
if len(errs) != 0 {
@ -447,6 +448,8 @@ func TestValidateVolumes(t *testing.T) {
emptyIQN := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "", 1, "ext4", false}}
emptyHosts := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"", "path", false}}
emptyPath := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"host", "", false}}
emptyMon := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{}, RBDImage: "bar", FSType: "ext4"}}
emptyImage := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "", FSType: "ext4"}}
errorCases := map[string]struct {
V []api.Volume
T errors.ValidationErrorType
@ -460,6 +463,8 @@ func TestValidateVolumes(t *testing.T) {
"empty iqn": {[]api.Volume{{Name: "badiqn", VolumeSource: emptyIQN}}, errors.ValidationErrorTypeRequired, "[0].source.iscsi.iqn"},
"empty hosts": {[]api.Volume{{Name: "badhost", VolumeSource: emptyHosts}}, errors.ValidationErrorTypeRequired, "[0].source.glusterfs.endpoints"},
"empty path": {[]api.Volume{{Name: "badpath", VolumeSource: emptyPath}}, errors.ValidationErrorTypeRequired, "[0].source.glusterfs.path"},
"empty mon": {[]api.Volume{{Name: "badmon", VolumeSource: emptyMon}}, errors.ValidationErrorTypeRequired, "[0].source.rbd.monitors"},
"empty image": {[]api.Volume{{Name: "badimage", VolumeSource: emptyImage}}, errors.ValidationErrorTypeRequired, "[0].source.rbd.image"},
}
for k, v := range errorCases {
_, errs := validateVolumes(v.V)

View File

@ -0,0 +1,118 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// diskManager interface and diskSetup/TearDown functions abtract commonly used procedures to setup a block volume
// rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume cleaner.
// TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD
//
package rbd
import (
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/golang/glog"
)
// Abstract interface to disk operations.
type diskManager interface {
MakeGlobalPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine.
AttachDisk(disk rbd) error
// Detaches the disk from the kubelet's host machine.
DetachDisk(disk rbd, mntPath string) error
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, disk rbd, volPath string, mounter mount.Interface) error {
globalPDPath := manager.MakeGlobalPDName(disk)
// TODO: handle failed mounts here.
mountpoint, err := mounter.IsMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if mountpoint {
return nil
}
if err := manager.AttachDisk(disk); err != nil {
glog.Errorf("failed to attach disk")
return err
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if disk.readOnly {
options = append(options, "ro")
}
err = mounter.Mount(globalPDPath, volPath, "", options)
if err != nil {
glog.Errorf("failed to bind mount:%s", globalPDPath)
return err
}
return nil
}
// utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, disk rbd, volPath string, mounter mount.Interface) error {
mountpoint, err := mounter.IsMountPoint(volPath)
if err != nil {
glog.Errorf("cannot validate mountpoint %s", volPath)
return err
}
if !mountpoint {
return os.Remove(volPath)
}
refs, err := mount.GetMountRefs(mounter, volPath)
if err != nil {
glog.Errorf("failed to get reference count %s", volPath)
return err
}
if err := mounter.Unmount(volPath); err != nil {
glog.Errorf("failed to umount %s", volPath)
return err
}
// If len(refs) is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 {
mntPath := refs[0]
if err := manager.DetachDisk(disk, mntPath); err != nil {
glog.Errorf("failed to detach disk from %s", mntPath)
return err
}
}
mountpoint, mntErr := mounter.IsMountPoint(volPath)
if mntErr != nil {
glog.Errorf("isMountpoint check failed: %v", mntErr)
return err
}
if !mountpoint {
if err := os.Remove(volPath); err != nil {
return err
}
}
return nil
}

203
pkg/volume/rbd/rbd.go Normal file
View File

@ -0,0 +1,203 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/golang/glog"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&RBDPlugin{nil, exec.New()}}
}
type RBDPlugin struct {
host volume.VolumeHost
exe exec.Interface
}
var _ volume.VolumePlugin = &RBDPlugin{}
const (
RBDPluginName = "kubernetes.io/rbd"
)
func (plugin *RBDPlugin) Init(host volume.VolumeHost) {
plugin.host = host
}
func (plugin *RBDPlugin) Name() string {
return RBDPluginName
}
func (plugin *RBDPlugin) CanSupport(spec *volume.Spec) bool {
if spec.VolumeSource.RBD == nil {
return false
}
// see if rbd is there
_, err := plugin.execCommand("rbd", []string{"-h"})
if err == nil {
return true
}
return false
}
func (plugin *RBDPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
}
}
func (plugin *RBDPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
secret := ""
if spec.VolumeSource.RBD.SecretRef != nil {
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secretName, err := kubeClient.Secrets(pod.Namespace).Get(spec.VolumeSource.RBD.SecretRef.Name)
if err != nil {
glog.Errorf("Couldn't get secret %v/%v", pod.Namespace, spec.VolumeSource.RBD.SecretRef)
return nil, err
}
for name, data := range secretName.Data {
secret = string(data)
glog.V(1).Infof("ceph secret info: %s/%s", name, secret)
}
}
// Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, pod.UID, &RBDUtil{}, mounter, secret)
}
func (plugin *RBDPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) {
pool := spec.VolumeSource.RBD.RBDPool
if pool == "" {
pool = "rbd"
}
id := spec.VolumeSource.RBD.RadosUser
if id == "" {
id = "admin"
}
keyring := spec.VolumeSource.RBD.Keyring
if keyring == "" {
keyring = "/etc/ceph/keyring"
}
return &rbd{
podUID: podUID,
volName: spec.Name,
mon: spec.VolumeSource.RBD.CephMonitors,
image: spec.VolumeSource.RBD.RBDImage,
pool: pool,
id: id,
keyring: keyring,
secret: secret,
fsType: spec.VolumeSource.RBD.FSType,
readOnly: spec.VolumeSource.RBD.ReadOnly,
manager: manager,
mounter: mounter,
plugin: plugin,
}, nil
}
func (plugin *RBDPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
// Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &RBDUtil{}, mounter)
}
func (plugin *RBDPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) {
return &rbd{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}, nil
}
type rbd struct {
volName string
podUID types.UID
mon []string
pool string
id string
image string
keyring string
secret string
fsType string
readOnly bool
plugin *RBDPlugin
mounter mount.Interface
// Utility interface that provides API calls to the provider to attach/detach disks.
manager diskManager
}
func (rbd *rbd) GetPath() string {
name := RBDPluginName
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
return rbd.plugin.host.GetPodVolumeDir(rbd.podUID, util.EscapeQualifiedNameForDisk(name), rbd.volName)
}
func (rbd *rbd) SetUp() error {
return rbd.SetUpAt(rbd.GetPath())
}
func (rbd *rbd) SetUpAt(dir string) error {
// diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(rbd.manager, *rbd, dir, rbd.mounter)
if err != nil {
glog.Errorf("rbd: failed to setup")
return err
}
globalPDPath := rbd.manager.MakeGlobalPDName(*rbd)
// make mountpoint rw/ro work as expected
//FIXME revisit pkg/util/mount and ensure rw/ro is implemented as expected
mode := "rw"
if rbd.readOnly {
mode = "ro"
}
rbd.plugin.execCommand("mount", []string{"-o", "remount," + mode, globalPDPath, dir})
return nil
}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (rbd *rbd) TearDown() error {
return rbd.TearDownAt(rbd.GetPath())
}
func (rbd *rbd) TearDownAt(dir string) error {
return diskTearDown(rbd.manager, *rbd, dir, rbd.mounter)
}
func (plugin *RBDPlugin) execCommand(command string, args []string) ([]byte, error) {
cmd := plugin.exe.Command(command, args...)
return cmd.CombinedOutput()
}

130
pkg/volume/rbd/rbd_test.go Normal file
View File

@ -0,0 +1,130 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"os"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
)
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.Name() != "kubernetes.io/rbd" {
t.Errorf("Wrong name: %s", plug.Name())
}
}
type fakeDiskManager struct{}
func (fake *fakeDiskManager) MakeGlobalPDName(disk rbd) string {
return "/tmp/fake_rbd_path"
}
func (fake *fakeDiskManager) AttachDisk(disk rbd) error {
globalPath := disk.manager.MakeGlobalPDName(disk)
err := os.MkdirAll(globalPath, 0750)
if err != nil {
return err
}
return nil
}
func (fake *fakeDiskManager) DetachDisk(disk rbd, mntPath string) error {
globalPath := disk.manager.MakeGlobalPDName(disk)
err := os.RemoveAll(globalPath)
if err != nil {
return err
}
return nil
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
RBD: &api.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
}
builder, err := plug.(*RBDPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}, "secrets")
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder: %v")
}
path := builder.GetPath()
if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~rbd/vol1" {
t.Errorf("Got unexpected path: %s", path)
}
if err := builder.SetUp(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
cleaner, err := plug.(*RBDPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err)
}
if cleaner == nil {
t.Errorf("Got a nil Cleaner: %v")
}
if err := cleaner.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}

140
pkg/volume/rbd/rbd_util.go Normal file
View File

@ -0,0 +1,140 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// utility functions to setup rbd volume
// mainly implement diskManager interface
//
package rbd
import (
"errors"
"fmt"
"math/rand"
"os"
"path"
"strings"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/golang/glog"
)
// stat a path, if not exists, retry maxRetries times
func waitForPathToExist(devicePath string, maxRetries int) bool {
for i := 0; i < maxRetries; i++ {
_, err := os.Stat(devicePath)
if err == nil {
return true
}
if err != nil && !os.IsNotExist(err) {
return false
}
time.Sleep(time.Second)
}
return false
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image
func makePDNameInternal(host volume.VolumeHost, pool string, image string) string {
return path.Join(host.GetPluginDir(RBDPluginName), "rbd", pool+"-image-"+image)
}
type RBDUtil struct{}
func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.pool, rbd.image)
}
func (util *RBDUtil) AttachDisk(rbd rbd) error {
var err error
devicePath := strings.Join([]string{"/dev/rbd", rbd.pool, rbd.image}, "/")
exist := waitForPathToExist(devicePath, 1)
if !exist {
// modprobe
_, err = rbd.plugin.execCommand("modprobe", []string{"rbd"})
if err != nil {
return fmt.Errorf("rbd: failed to modprobe rbd error:%v", err)
}
// rbd map
l := len(rbd.mon)
// avoid mount storm, pick a host randomly
start := rand.Int() % l
// iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ {
mon := rbd.mon[i%l]
glog.V(1).Infof("rbd: map mon %s", mon)
if rbd.secret != "" {
_, err = rbd.plugin.execCommand("rbd",
[]string{"map", rbd.image, "--pool", rbd.pool, "--id", rbd.id, "-m", mon, "--key=" + rbd.secret})
} else {
_, err = rbd.plugin.execCommand("rbd",
[]string{"map", rbd.image, "--pool", rbd.pool, "--id", rbd.id, "-m", mon, "-k", rbd.keyring})
}
if err == nil {
break
}
}
}
if err != nil {
return err
}
exist = waitForPathToExist(devicePath, 10)
if !exist {
return errors.New("Could not map image: Timeout after 10s")
}
// mount it
globalPDPath := rbd.manager.MakeGlobalPDName(rbd)
mountpoint, err := rbd.mounter.IsMountPoint(globalPDPath)
if err != nil {
return fmt.Errorf("rbd: %s failed to check mountpoint", globalPDPath)
}
if mountpoint {
return nil
}
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return fmt.Errorf("rbd: failed to mkdir %s, error", globalPDPath)
}
if err = rbd.mounter.Mount(devicePath, globalPDPath, rbd.fsType, nil); err != nil {
err = fmt.Errorf("rbd: failed to mount rbd volume %s [%s] to %s, error %v", devicePath, rbd.fsType, globalPDPath, err)
}
return err
}
func (util *RBDUtil) DetachDisk(rbd rbd, mntPath string) error {
device, cnt, err := mount.GetDeviceNameFromMount(rbd.mounter, mntPath)
if err != nil {
return fmt.Errorf("rbd detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
}
if err = rbd.mounter.Unmount(mntPath); err != nil {
return fmt.Errorf("rbd detach disk: failed to umount: %s\nError: %v", mntPath, err)
}
// if device is no longer used, see if can unmap
if cnt <= 1 {
// rbd unmap
_, err = rbd.plugin.execCommand("rbd", []string{"unmap", device})
if err != nil {
return fmt.Errorf("rbd: failed to unmap device %s:Error: %v", device, err)
}
glog.Infof("rbd: successfully unmap device %s", device)
}
return nil
}