implement Ceph FS volume plugin and add to e2e volume test

Signed-off-by: Huamin Chen <hchen@redhat.com>
pull/6/head
Huamin Chen 2015-04-09 14:05:24 -04:00
parent 64717962cf
commit fe559f2726
28 changed files with 1184 additions and 1 deletions

View File

@ -12078,6 +12078,10 @@
"$ref": "v1.CinderVolumeSource",
"description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"cephfs": {
"$ref": "v1.CephFSVolumeSource",
"description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime"
},
"accessModes": {
"type": "array",
"items": {
@ -12319,6 +12323,38 @@
}
}
},
"v1.CephFSVolumeSource": {
"id": "v1.CephFSVolumeSource",
"description": "CephFSVolumeSource represents a Ceph Filesystem Mount that lasts the lifetime of a pod",
"required": [
"monitors"
],
"properties": {
"monitors": {
"type": "array",
"items": {
"type": "string"
},
"description": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it"
},
"user": {
"type": "string",
"description": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it"
},
"secretFile": {
"type": "string",
"description": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it"
},
"secretRef": {
"$ref": "v1.LocalObjectReference",
"description": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it"
},
"readOnly": {
"type": "boolean",
"description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it"
}
}
},
"v1.PersistentVolumeStatus": {
"id": "v1.PersistentVolumeStatus",
"description": "PersistentVolumeStatus is the current status of a persistent volume.",
@ -12517,6 +12553,10 @@
"cinder": {
"$ref": "v1.CinderVolumeSource",
"description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"cephfs": {
"$ref": "v1.CephFSVolumeSource",
"description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime"
}
}
},

View File

@ -26,6 +26,7 @@ import (
// Volume plugins
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/aws_ebs"
"k8s.io/kubernetes/pkg/volume/cephfs"
"k8s.io/kubernetes/pkg/volume/cinder"
"k8s.io/kubernetes/pkg/volume/empty_dir"
"k8s.io/kubernetes/pkg/volume/gce_pd"
@ -60,6 +61,8 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, persistent_claim.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -0,0 +1,12 @@
FROM centos:6
MAINTAINER Huamin Chen, hchen@redhat.com
ADD install.sh /usr/local/bin/
RUN /usr/local/bin/install.sh
ADD init.sh /usr/local/bin/
ADD index.html /tmp/
RUN chmod 644 /tmp/index.html
EXPOSE 6789/tcp
ENTRYPOINT ["/usr/local/bin/init.sh"]

View File

@ -0,0 +1,13 @@
all: push
TAG = 0.1
container:
docker build -t gcr.io/google_containers/volume-ceph . # Build new image and automatically tag it as latest
docker tag gcr.io/google_containers/volume-ceph gcr.io/google_containers/volume-ceph:$(TAG) # Add the version tag to the latest image
push: container
gcloud preview docker push gcr.io/google_containers/volume-ceph # Push image tagged as latest to repository
gcloud preview docker push gcr.io/google_containers/volume-ceph:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag)
clean:

View File

@ -0,0 +1,8 @@
# Ceph server container for testing
This container exports ceph fs with an index.html inside.
Used by test/e2e/* to test CephFSVolumeSource. Not for production use!
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/for-tests/volumes-tester/ceph/README.md?pixel)]()

View File

@ -0,0 +1 @@
Hello Ceph!

View File

@ -0,0 +1,93 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#set -e
set -x
# clean up
rm -f /etc/ceph/*
pkill -9 ceph-mon
pkill -9 ceph-osd
pkill -9 ceph-mds
mkdir -p /var/lib/ceph
mkdir -p /var/lib/ceph/osd
mkdir -p /var/lib/ceph/osd/ceph-0
# create hostname for ceph monitor
MASTER=`hostname -s`
ip=$(ip -4 -o a | grep eth0 | awk '{print $4}' | cut -d'/' -f1)
echo "$ip $MASTER" >> /etc/hosts
#create ceph cluster
ceph-deploy --overwrite-conf new ${MASTER}
ceph-deploy --overwrite-conf mon create-initial ${MASTER}
ceph-deploy --overwrite-conf mon create ${MASTER}
ceph-deploy gatherkeys ${MASTER}
# set osd params for minimal configuration
echo "osd crush chooseleaf type = 0" >> /etc/ceph/ceph.conf
echo "osd journal size = 100" >> /etc/ceph/ceph.conf
echo "osd pool default size = 1" >> /etc/ceph/ceph.conf
echo "osd pool default pgp num = 8" >> /etc/ceph/ceph.conf
echo "osd pool default pg num = 8" >> /etc/ceph/ceph.conf
/sbin/service ceph -c /etc/ceph/ceph.conf stop mon.${MASTER}
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.${MASTER}
# create ceph osd
ceph osd create
ceph-osd -i 0 --mkfs --mkkey
ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-0/keyring
ceph osd crush add 0 1 root=default host=${MASTER}
ceph-osd -i 0 -k /var/lib/ceph/osd/ceph-0/keyring
#see if we are ready to go
ceph osd tree
# create ceph fs
ceph osd pool create cephfs_data 4
ceph osd pool create cephfs_metadata 4
ceph fs new cephfs cephfs_metadata cephfs_data
ceph-deploy --overwrite-conf mds create ${MASTER}
# uncomment the following for rbd test
# ceph osd pool create kube 4
# rbd create foo --size 10 --pool kube
ps -ef |grep ceph
ceph osd dump
# add new client with a pre defined keyring
# this keyring must match the ceph secret in e2e test
cat > /etc/ceph/ceph.client.kube.keyring <<EOF
[client.kube]
key = AQAMgXhVwBCeDhAA9nlPaFyfUSatGD4drFWDvQ==
caps mds = "allow rwx"
caps mon = "allow rwx"
caps osd = "allow rwx"
EOF
ceph auth import -i /etc/ceph/ceph.client.kube.keyring
# mount it through ceph-fuse and copy file to ceph fs
ceph-fuse -m ${MASTER}:6789 /mnt
cp /tmp/index.html /mnt
chmod 644 /mnt/index.html
# watch
ceph -w

View File

@ -0,0 +1,27 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
yum update -y -v
yum install openssh openssh-server openssh-clients hostname -y -q
ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
cat ~/.ssh/id_rsa.pub |awk '{print $1, $2, "Generated"}' >> ~/.ssh/authorized_keys2
cat ~/.ssh/id_rsa.pub |awk '{print $1, $2, "Generated"}' >> ~/.ssh/authorized_keys
rpm -Uvh http://ceph.com/rpm/rhel6/noarch/ceph-release-1-0.el6.noarch.rpm
yum install -y -q python-itsdangerous python-werkzeug python-jinja2 python-flask ceph-deploy epel-release
# ceph pkg depends on epel-release
yum install -y -q ceph ceph-fuse

68
examples/cephfs/README.md Normal file
View File

@ -0,0 +1,68 @@
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<strong>
The latest 1.0.x release of this document can be found
[here](http://releases.k8s.io/release-1.0/examples/cephfs/README.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# How to Use it?
Install Ceph on the Kubernetes host. For example, on Fedora 21
# yum -y install ceph
If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/rootfs/ceph_docker)
Then get the keyring from the Ceph cluster and copy it to */etc/ceph/keyring*.
Once you have installed Ceph and new Kubernetes, you can create a pod based on my examples [cephfs.json](cephfs.json) and [cephfs-with-secret.json](cephfs-with-secret.json). In the pod JSON, you need to provide the following information.
- *monitors*: Array of Ceph monitors.
- *user*: The RADOS user name. If not provided, default *admin* is used.
- *secretFile*: The path to the keyring file. If not provided, default */etc/ceph/user.secret* is used.
- *secretRef*: Reference to Ceph authentication secrets. If provided, *secret* overrides *secretFile*.
- *readOnly*: Whether the filesystem is used as readOnly.
Here are the commands:
```console
# create a secret if you want to use Ceph secret instead of secret file
# cluster/kubectl.sh create -f examples/cephfs/secret/ceph-secret.yaml
# cluster/kubectl.sh create -f examples/cephfs/v1beta3/cephfs.json
# cluster/kubectl.sh get pods
```
If you ssh to that machine, you can run `docker ps` to see the actual pod and `docker inspect` to see the volumes used by the container.
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cephfs/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -0,0 +1,39 @@
{
"apiVersion": "v1",
"id": "cephfs2",
"kind": "Pod",
"metadata": {
"name": "cephfs2"
},
"spec": {
"containers": [
{
"name": "cephfs-rw",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/cephfs",
"name": "cephfs"
}
]
}
],
"volumes": [
{
"name": "cephfs",
"cephfs": {
"monitors": [
"10.16.154.78:6789",
"10.16.154.82:6789",
"10.16.154.83:6789"
],
"user": "admin",
"secretRef": {
"name": "ceph-secret"
},
"readOnly": true
}
}
]
}
}

View File

@ -0,0 +1,37 @@
{
"apiVersion": "v1",
"id": "cephfs",
"kind": "Pod",
"metadata": {
"name": "cephfs"
},
"spec": {
"containers": [
{
"name": "cephfs-rw",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/cephfs",
"name": "cephfs"
}
]
}
],
"volumes": [
{
"name": "cephfs",
"cephfs": {
"monitors": [
"10.16.154.78:6789",
"10.16.154.82:6789",
"10.16.154.83:6789"
],
"user": "admin",
"scretFile": "/etc/ceph/admin.secret",
"readOnly": true
}
}
]
}
}

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
data:
key: QVFCMTZWMVZvRjVtRXhBQTVrQ1FzN2JCajhWVUxSdzI2Qzg0SEE9PQ==

View File

@ -332,6 +332,10 @@ func TestExampleObjectSchemas(t *testing.T) {
"zookeeper-service": &api.Service{},
"zookeeper": &api.Pod{},
},
"../examples/cephfs/": {
"cephfs": &api.Pod{},
"cephfs-with-secret": &api.Pod{},
},
}
capabilities.SetForTests(capabilities.Capabilities{

View File

@ -71,6 +71,29 @@ func deepCopy_api_Capabilities(in Capabilities, out *Capabilities, c *conversion
return nil
}
func deepCopy_api_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error {
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(LocalObjectReference)
if err := deepCopy_api_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
@ -1193,6 +1216,14 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(CephFSVolumeSource)
if err := deepCopy_api_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -2133,6 +2164,14 @@ func deepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(CephFSVolumeSource)
if err := deepCopy_api_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -2173,6 +2212,7 @@ func init() {
deepCopy_api_AWSElasticBlockStoreVolumeSource,
deepCopy_api_Binding,
deepCopy_api_Capabilities,
deepCopy_api_CephFSVolumeSource,
deepCopy_api_CinderVolumeSource,
deepCopy_api_ComponentCondition,
deepCopy_api_ComponentStatus,

View File

@ -226,6 +226,8 @@ type VolumeSource struct {
RBD *RBDVolumeSource `json:"rbd,omitempty"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
// CephFS represents a Cephfs mount on the host that shares a pod's lifetime
CephFS *CephFSVolumeSource `json:"cephfs,omitempty"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -252,6 +254,8 @@ type PersistentVolumeSource struct {
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
CephFS *CephFSVolumeSource `json:"cephfs,omitempty"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -577,6 +581,21 @@ type CinderVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// CephFSVolumeSource represents a Ceph Filesystem Mount that lasts the lifetime of a pod
type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
Monitors []string `json:"monitors"`
// Optional: User is the rados user name, default is admin
User string `json:"user,omitempty"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
SecretFile string `json:"secretFile,omitempty"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
SecretRef *LocalObjectReference `json:"secretRef,omitempty"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty"`
}
// ContainerPort represents a network port in a single container
type ContainerPort struct {
// Optional: If specified, this must be an IANA_SVC_NAME Each named port

View File

@ -76,6 +76,32 @@ func convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capa
return nil
}
func convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.CephFSVolumeSource))(in)
}
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(LocalObjectReference)
if err := convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.CinderVolumeSource))(in)
@ -1373,6 +1399,14 @@ func convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.Per
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(CephFSVolumeSource)
if err := convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -2363,6 +2397,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *Volu
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(CephFSVolumeSource)
if err := convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -2416,6 +2458,32 @@ func convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capa
return nil
}
func convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*CephFSVolumeSource))(in)
}
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(api.LocalObjectReference)
if err := convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*CinderVolumeSource))(in)
@ -3713,6 +3781,14 @@ func convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Persist
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(api.CephFSVolumeSource)
if err := convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -4703,6 +4779,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.Volu
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(api.CephFSVolumeSource)
if err := convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -4711,6 +4795,7 @@ func init() {
convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource,
convert_api_Binding_To_v1_Binding,
convert_api_Capabilities_To_v1_Capabilities,
convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource,
convert_api_CinderVolumeSource_To_v1_CinderVolumeSource,
convert_api_ComponentCondition_To_v1_ComponentCondition,
convert_api_ComponentStatusList_To_v1_ComponentStatusList,
@ -4826,6 +4911,7 @@ func init() {
convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource,
convert_v1_Binding_To_api_Binding,
convert_v1_Capabilities_To_api_Capabilities,
convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource,
convert_v1_CinderVolumeSource_To_api_CinderVolumeSource,
convert_v1_ComponentCondition_To_api_ComponentCondition,
convert_v1_ComponentStatusList_To_api_ComponentStatusList,

View File

@ -86,6 +86,29 @@ func deepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion.
return nil
}
func deepCopy_v1_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error {
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(LocalObjectReference)
if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_v1_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
@ -1192,6 +1215,14 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(CephFSVolumeSource)
if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -2138,6 +2169,14 @@ func deepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(CephFSVolumeSource)
if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -2175,6 +2214,7 @@ func init() {
deepCopy_v1_AWSElasticBlockStoreVolumeSource,
deepCopy_v1_Binding,
deepCopy_v1_Capabilities,
deepCopy_v1_CephFSVolumeSource,
deepCopy_v1_CinderVolumeSource,
deepCopy_v1_ComponentCondition,
deepCopy_v1_ComponentStatus,

View File

@ -280,6 +280,8 @@ type VolumeSource struct {
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
CephFS *CephFSVolumeSource `json:"cephfs,omitempty"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@ -328,6 +330,8 @@ type PersistentVolumeSource struct {
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
CephFS *CephFSVolumeSource `json:"cephfs,omitempty"`
}
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
@ -594,6 +598,26 @@ type CinderVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// CephFSVolumeSource represents a Ceph Filesystem Mount that lasts the lifetime of a pod
type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors"`
// Optional: User is the rados user name, default is admin
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
User string `json:"user,omitempty"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
SecretFile string `json:"secretFile,omitempty"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
SecretRef *LocalObjectReference `json:"secretRef,omitempty"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
ReadOnly bool `json:"readOnly,omitempty"`
}
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)

View File

@ -69,6 +69,19 @@ func (Capabilities) SwaggerDoc() map[string]string {
return map_Capabilities
}
var map_CephFSVolumeSource = map[string]string{
"": "CephFSVolumeSource represents a Ceph Filesystem Mount that lasts the lifetime of a pod",
"monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it",
"user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it",
"secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it",
"secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it",
"readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it",
}
func (CephFSVolumeSource) SwaggerDoc() map[string]string {
return map_CephFSVolumeSource
}
var map_CinderVolumeSource = map[string]string{
"": "CinderVolumeSource represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet.",
"volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
@ -800,6 +813,7 @@ var map_PersistentVolumeSource = map[string]string{
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@ -1375,6 +1389,7 @@ var map_VolumeSource = map[string]string{
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
}
func (VolumeSource) SwaggerDoc() map[string]string {

View File

@ -379,6 +379,10 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
numVolumes++
allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder).Prefix("cinder")...)
}
if source.CephFS != nil {
numVolumes++
allErrs = append(allErrs, validateCephFS(source.CephFS).Prefix("cephfs")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required"))
}
@ -513,6 +517,14 @@ func validateCinderVolumeSource(cd *api.CinderVolumeSource) errs.ValidationError
return allErrs
}
func validateCephFS(cephfs *api.CephFSVolumeSource) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if len(cephfs.Monitors) == 0 {
allErrs = append(allErrs, errs.NewFieldRequired("monitors"))
}
return allErrs
}
func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
@ -568,6 +580,10 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) errs.ValidationErrorList
numVolumes++
allErrs = append(allErrs, validateRBD(pv.Spec.RBD).Prefix("rbd")...)
}
if pv.Spec.CephFS != nil {
numVolumes++
allErrs = append(allErrs, validateCephFS(pv.Spec.CephFS).Prefix("cephfs")...)
}
if pv.Spec.ISCSI != nil {
numVolumes++
allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI).Prefix("iscsi")...)

View File

@ -457,12 +457,13 @@ func TestValidateVolumes(t *testing.T) {
{Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host1", Path: "path", ReadOnly: false}}},
{Name: "rbd", VolumeSource: api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "bar", FSType: "ext4"}}},
{Name: "cinder", VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{"29ea5088-4f60-4757-962e-dba678767887", "ext4", false}}},
{Name: "cephfs", VolumeSource: api.VolumeSource{CephFS: &api.CephFSVolumeSource{Monitors: []string{"foo"}}}},
}
names, errs := validateVolumes(successCase)
if len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder") {
if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder", "cephfs") {
t.Errorf("wrong names result: %v", names)
}
emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}
@ -472,6 +473,7 @@ func TestValidateVolumes(t *testing.T) {
emptyPath := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host", Path: "", ReadOnly: false}}
emptyMon := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{}, RBDImage: "bar", FSType: "ext4"}}
emptyImage := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "", FSType: "ext4"}}
emptyCephFSMon := api.VolumeSource{CephFS: &api.CephFSVolumeSource{Monitors: []string{}}}
errorCases := map[string]struct {
V []api.Volume
T errors.ValidationErrorType
@ -487,6 +489,7 @@ func TestValidateVolumes(t *testing.T) {
"empty path": {[]api.Volume{{Name: "badpath", VolumeSource: emptyPath}}, errors.ValidationErrorTypeRequired, "[0].source.glusterfs.path"},
"empty mon": {[]api.Volume{{Name: "badmon", VolumeSource: emptyMon}}, errors.ValidationErrorTypeRequired, "[0].source.rbd.monitors"},
"empty image": {[]api.Volume{{Name: "badimage", VolumeSource: emptyImage}}, errors.ValidationErrorTypeRequired, "[0].source.rbd.image"},
"empty cephfs mon": {[]api.Volume{{Name: "badmon", VolumeSource: emptyCephFSMon}}, errors.ValidationErrorTypeRequired, "[0].source.cephfs.monitors"},
}
for k, v := range errorCases {
_, errs := validateVolumes(v.V)

View File

@ -56,6 +56,29 @@ func deepCopy_api_Capabilities(in api.Capabilities, out *api.Capabilities, c *co
return nil
}
func deepCopy_api_CephFSVolumeSource(in api.CephFSVolumeSource, out *api.CephFSVolumeSource, c *conversion.Cloner) error {
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(api.LocalObjectReference)
if err := deepCopy_api_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_CinderVolumeSource(in api.CinderVolumeSource, out *api.CinderVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
@ -677,6 +700,14 @@ func deepCopy_api_VolumeSource(in api.VolumeSource, out *api.VolumeSource, c *co
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(api.CephFSVolumeSource)
if err := deepCopy_api_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -1060,6 +1091,7 @@ func init() {
err := api.Scheme.AddGeneratedDeepCopyFuncs(
deepCopy_api_AWSElasticBlockStoreVolumeSource,
deepCopy_api_Capabilities,
deepCopy_api_CephFSVolumeSource,
deepCopy_api_CinderVolumeSource,
deepCopy_api_Container,
deepCopy_api_ContainerPort,

View File

@ -62,6 +62,32 @@ func convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.C
return nil
}
func convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.CephFSVolumeSource))(in)
}
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(v1.LocalObjectReference)
if err := convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.CinderVolumeSource))(in)
@ -722,6 +748,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.V
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(v1.CephFSVolumeSource)
if err := convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -759,6 +793,32 @@ func convert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.C
return nil
}
func convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*v1.CephFSVolumeSource))(in)
}
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(api.LocalObjectReference)
if err := convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*v1.CinderVolumeSource))(in)
@ -1419,6 +1479,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.V
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(api.CephFSVolumeSource)
if err := convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -2168,6 +2236,7 @@ func init() {
err := api.Scheme.AddGeneratedConversionFuncs(
convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource,
convert_api_Capabilities_To_v1_Capabilities,
convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource,
convert_api_CinderVolumeSource_To_v1_CinderVolumeSource,
convert_api_ContainerPort_To_v1_ContainerPort,
convert_api_Container_To_v1_Container,
@ -2226,6 +2295,7 @@ func init() {
convert_v1_APIVersion_To_expapi_APIVersion,
convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource,
convert_v1_Capabilities_To_api_Capabilities,
convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource,
convert_v1_CinderVolumeSource_To_api_CinderVolumeSource,
convert_v1_ContainerPort_To_api_ContainerPort,
convert_v1_Container_To_api_Container,

View File

@ -73,6 +73,29 @@ func deepCopy_v1_Capabilities(in v1.Capabilities, out *v1.Capabilities, c *conve
return nil
}
func deepCopy_v1_CephFSVolumeSource(in v1.CephFSVolumeSource, out *v1.CephFSVolumeSource, c *conversion.Cloner) error {
if in.Monitors != nil {
out.Monitors = make([]string, len(in.Monitors))
for i := range in.Monitors {
out.Monitors[i] = in.Monitors[i]
}
} else {
out.Monitors = nil
}
out.User = in.User
out.SecretFile = in.SecretFile
if in.SecretRef != nil {
out.SecretRef = new(v1.LocalObjectReference)
if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil {
return err
}
} else {
out.SecretRef = nil
}
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_v1_CinderVolumeSource(in v1.CinderVolumeSource, out *v1.CinderVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
@ -695,6 +718,14 @@ func deepCopy_v1_VolumeSource(in v1.VolumeSource, out *v1.VolumeSource, c *conve
} else {
out.Cinder = nil
}
if in.CephFS != nil {
out.CephFS = new(v1.CephFSVolumeSource)
if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil {
return err
}
} else {
out.CephFS = nil
}
return nil
}
@ -1068,6 +1099,7 @@ func init() {
deepCopy_resource_Quantity,
deepCopy_v1_AWSElasticBlockStoreVolumeSource,
deepCopy_v1_Capabilities,
deepCopy_v1_CephFSVolumeSource,
deepCopy_v1_CinderVolumeSource,
deepCopy_v1_Container,
deepCopy_v1_ContainerPort,

265
pkg/volume/cephfs/cephfs.go Normal file
View File

@ -0,0 +1,265 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"fmt"
"os"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&cephfsPlugin{nil}}
}
type cephfsPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &cephfsPlugin{}
const (
cephfsPluginName = "kubernetes.io/cephfs"
)
func (plugin *cephfsPlugin) Init(host volume.VolumeHost) {
plugin.host = host
}
func (plugin *cephfsPlugin) Name() string {
return cephfsPluginName
}
func (plugin *cephfsPlugin) CanSupport(spec *volume.Spec) bool {
return spec.VolumeSource.CephFS != nil || spec.PersistentVolumeSource.CephFS != nil
}
func (plugin *cephfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
api.ReadWriteMany,
}
}
func (plugin *cephfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
cephvs := plugin.getVolumeSource(spec)
secret := ""
if cephvs.SecretRef != nil {
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secretName, err := kubeClient.Secrets(pod.Namespace).Get(cephvs.SecretRef.Name)
if err != nil {
err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, cephvs.SecretRef, err)
return nil, err
}
for name, data := range secretName.Data {
secret = string(data)
glog.V(1).Infof("found ceph secret info: %s", name)
}
}
return plugin.newBuilderInternal(spec, pod.UID, mounter, secret)
}
func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Builder, error) {
cephvs := plugin.getVolumeSource(spec)
id := cephvs.User
if id == "" {
id = "admin"
}
secret_file := cephvs.SecretFile
if secret_file == "" {
secret_file = "/etc/ceph/" + id + ".secret"
}
return &cephfsBuilder{
cephfs: &cephfs{
podUID: podUID,
volName: spec.Name,
mon: cephvs.Monitors,
secret: secret,
id: id,
secret_file: secret_file,
readonly: cephvs.ReadOnly,
mounter: mounter,
plugin: plugin},
}, nil
}
func (plugin *cephfsPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
return plugin.newCleanerInternal(volName, podUID, mounter)
}
func (plugin *cephfsPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
return &cephfsCleaner{
cephfs: &cephfs{
podUID: podUID,
volName: volName,
mounter: mounter,
plugin: plugin},
}, nil
}
func (plugin *cephfsPlugin) getVolumeSource(spec *volume.Spec) *api.CephFSVolumeSource {
if spec.VolumeSource.CephFS != nil {
return spec.VolumeSource.CephFS
} else {
return spec.PersistentVolumeSource.CephFS
}
}
// CephFS volumes represent a bare host file or directory mount of an CephFS export.
type cephfs struct {
volName string
podUID types.UID
mon []string
id string
secret string
secret_file string
readonly bool
mounter mount.Interface
plugin *cephfsPlugin
}
type cephfsBuilder struct {
*cephfs
}
var _ volume.Builder = &cephfsBuilder{}
// SetUp attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsBuilder) SetUp() error {
return cephfsVolume.SetUpAt(cephfsVolume.GetPath())
}
// SetUpAt attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsBuilder) SetUpAt(dir string) error {
notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) {
return err
}
if !notMnt {
return nil
}
os.MkdirAll(dir, 0750)
err = cephfsVolume.execMount(dir)
if err == nil {
return nil
}
// cleanup upon failure
cephfsVolume.cleanup(dir)
// return error
return err
}
func (cephfsVolume *cephfsBuilder) IsReadOnly() bool {
return cephfsVolume.readonly
}
type cephfsCleaner struct {
*cephfs
}
var _ volume.Cleaner = &cephfsCleaner{}
// TearDown unmounts the bind mount
func (cephfsVolume *cephfsCleaner) TearDown() error {
return cephfsVolume.TearDownAt(cephfsVolume.GetPath())
}
// TearDownAt unmounts the bind mount
func (cephfsVolume *cephfsCleaner) TearDownAt(dir string) error {
return cephfsVolume.cleanup(dir)
}
// GatePath creates global mount path
func (cephfsVolume *cephfs) GetPath() string {
name := cephfsPluginName
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, util.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
}
func (cephfsVolume *cephfs) cleanup(dir string) error {
noMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("CephFS: Error checking IsLikelyNotMountPoint: %v", err)
}
if noMnt {
return os.RemoveAll(dir)
}
if err := cephfsVolume.mounter.Unmount(dir); err != nil {
return fmt.Errorf("CephFS: Unmounting failed: %v", err)
}
noMnt, mntErr := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
return fmt.Errorf("CephFS: IsMountpoint check failed: %v", mntErr)
}
if noMnt {
if err := os.RemoveAll(dir); err != nil {
return fmt.Errorf("CephFS: removeAll %s/%v", dir, err)
}
}
return nil
}
func (cephfsVolume *cephfs) execMount(mountpoint string) error {
// cephfs mount option
ceph_opt := ""
// override secretfile if secret is provided
if cephfsVolume.secret != "" {
ceph_opt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret
} else {
ceph_opt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secret_file
}
// build option array
opt := []string{}
if cephfsVolume.readonly {
opt = append(opt, "ro")
}
opt = append(opt, ceph_opt)
// build src like mon1:6789,mon2:6789,mon3:6789:/
hosts := cephfsVolume.mon
l := len(hosts)
// pass all monitors and let ceph randomize and fail over
i := 0
src := ""
for i = 0; i < l-1; i++ {
src += hosts[i] + ","
}
src += hosts[i] + ":/"
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil {
return fmt.Errorf("CephFS: mount failed: %v", err)
}
return nil
}

View File

@ -0,0 +1,103 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.Name() != "kubernetes.io/cephfs" {
t.Errorf("Wrong name: %s", plug.Name())
}
if plug.CanSupport(&volume.Spec{Name: "foo", VolumeSource: api.VolumeSource{}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Name: "foo", VolumeSource: api.VolumeSource{CephFS: &api.CephFSVolumeSource{}}}) {
t.Errorf("Expected true")
}
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
CephFS: &api.CephFSVolumeSource{
Monitors: []string{"a", "b"},
User: "user",
SecretRef: nil,
SecretFile: "/etc/ceph/user.secret",
},
},
}
builder, err := plug.(*cephfsPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
volumePath := builder.GetPath()
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder: %v")
}
path := builder.GetPath()
if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~cephfs/vol1" {
t.Errorf("Got unexpected path: %s", path)
}
if err := builder.SetUp(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
cleaner, err := plug.(*cephfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err)
}
if cleaner == nil {
t.Errorf("Got a nil Cleaner: %v")
}
if err := cleaner.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}

19
pkg/volume/cephfs/doc.go Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs contains the internal representation of Ceph file system
// (CephFS) volumes.
package cephfs

View File

@ -478,5 +478,73 @@ var _ = Describe("Volumes", func() {
})
})
////////////////////////////////////////////////////////////////////////
// Ceph
////////////////////////////////////////////////////////////////////////
// Marked with [Skipped] to skip the test by default (see driver.go),
// the test needs privileged containers, which are disabled by default.
// Run the test with "go run hack/e2e.go ... --ginkgo.focus=Volume"
Describe("[Skipped] CephFS", func() {
It("should be mountable", func() {
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "cephfs",
serverImage: "gcr.io/google_containers/volume-ceph",
serverPorts: []int{6789},
}
defer func() {
if clean {
volumeTestCleanup(c, config)
}
}()
pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP
Logf("Ceph server IP address: %v", serverIP)
By("sleeping a bit to give ceph server time to initialize")
time.Sleep(20 * time.Second)
// create ceph secret
secret := &api.Secret{
TypeMeta: api.TypeMeta{
Kind: "Secret",
APIVersion: "v1beta3",
},
ObjectMeta: api.ObjectMeta{
Name: config.prefix + "-secret",
},
// Must use the ceph keyring at contrib/for-tests/volumes-ceph/ceph/init.sh
// and encode in base64
Data: map[string][]byte{
"key": []byte("AQAMgXhVwBCeDhAA9nlPaFyfUSatGD4drFWDvQ=="),
},
}
defer func() {
if clean {
if err := c.Secrets(namespace.Name).Delete(secret.Name); err != nil {
Failf("unable to delete secret %v: %v", secret.Name, err)
}
}
}()
var err error
if secret, err = c.Secrets(namespace.Name).Create(secret); err != nil {
Failf("unable to create test secret %s: %v", secret.Name, err)
}
volume := api.VolumeSource{
CephFS: &api.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &api.LocalObjectReference{Name: config.prefix + "-secret"},
ReadOnly: true,
},
}
// Must match content of contrib/for-tests/volumes-ceph/ceph/index.html
testVolumeClient(c, config, volume, "Hello Ceph!")
})
})
})