From fe559f27264f424116008307fb49250ad0446afe Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Thu, 9 Apr 2015 14:05:24 -0400 Subject: [PATCH] implement Ceph FS volume plugin and add to e2e volume test Signed-off-by: Huamin Chen --- api/swagger-spec/v1.json | 40 +++ cmd/kubelet/app/plugins.go | 3 + .../for-tests/volumes-tester/ceph/Dockerfile | 12 + .../for-tests/volumes-tester/ceph/Makefile | 13 + .../for-tests/volumes-tester/ceph/README.md | 8 + .../for-tests/volumes-tester/ceph/index.html | 1 + contrib/for-tests/volumes-tester/ceph/init.sh | 93 ++++++ .../for-tests/volumes-tester/ceph/install.sh | 27 ++ examples/cephfs/README.md | 68 +++++ examples/cephfs/cephfs-with-secret.json | 39 +++ examples/cephfs/cephfs.json | 37 +++ examples/cephfs/secret/ceph-secret.yaml | 6 + examples/examples_test.go | 4 + pkg/api/deep_copy_generated.go | 40 +++ pkg/api/types.go | 19 ++ pkg/api/v1/conversion_generated.go | 86 ++++++ pkg/api/v1/deep_copy_generated.go | 40 +++ pkg/api/v1/types.go | 24 ++ pkg/api/v1/types_swagger_doc_generated.go | 15 + pkg/api/validation/validation.go | 16 ++ pkg/api/validation/validation_test.go | 5 +- pkg/expapi/deep_copy_generated.go | 32 +++ pkg/expapi/v1/conversion_generated.go | 70 +++++ pkg/expapi/v1/deep_copy_generated.go | 32 +++ pkg/volume/cephfs/cephfs.go | 265 ++++++++++++++++++ pkg/volume/cephfs/cephfs_test.go | 103 +++++++ pkg/volume/cephfs/doc.go | 19 ++ test/e2e/volumes.go | 68 +++++ 28 files changed, 1184 insertions(+), 1 deletion(-) create mode 100644 contrib/for-tests/volumes-tester/ceph/Dockerfile create mode 100644 contrib/for-tests/volumes-tester/ceph/Makefile create mode 100644 contrib/for-tests/volumes-tester/ceph/README.md create mode 100644 contrib/for-tests/volumes-tester/ceph/index.html create mode 100755 contrib/for-tests/volumes-tester/ceph/init.sh create mode 100755 contrib/for-tests/volumes-tester/ceph/install.sh create mode 100644 examples/cephfs/README.md create mode 100644 examples/cephfs/cephfs-with-secret.json create mode 100644 examples/cephfs/cephfs.json create mode 100644 examples/cephfs/secret/ceph-secret.yaml create mode 100644 pkg/volume/cephfs/cephfs.go create mode 100644 pkg/volume/cephfs/cephfs_test.go create mode 100644 pkg/volume/cephfs/doc.go diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 0588601d09..45245059db 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -12078,6 +12078,10 @@ "$ref": "v1.CinderVolumeSource", "description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md" }, + "cephfs": { + "$ref": "v1.CephFSVolumeSource", + "description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime" + }, "accessModes": { "type": "array", "items": { @@ -12319,6 +12323,38 @@ } } }, + "v1.CephFSVolumeSource": { + "id": "v1.CephFSVolumeSource", + "description": "CephFSVolumeSource represents a Ceph Filesystem Mount that lasts the lifetime of a pod", + "required": [ + "monitors" + ], + "properties": { + "monitors": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it" + }, + "user": { + "type": "string", + "description": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it" + }, + "secretFile": { + "type": "string", + "description": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it" + }, + "secretRef": { + "$ref": "v1.LocalObjectReference", + "description": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it" + }, + "readOnly": { + "type": "boolean", + "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it" + } + } + }, "v1.PersistentVolumeStatus": { "id": "v1.PersistentVolumeStatus", "description": "PersistentVolumeStatus is the current status of a persistent volume.", @@ -12517,6 +12553,10 @@ "cinder": { "$ref": "v1.CinderVolumeSource", "description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md" + }, + "cephfs": { + "$ref": "v1.CephFSVolumeSource", + "description": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime" } } }, diff --git a/cmd/kubelet/app/plugins.go b/cmd/kubelet/app/plugins.go index bdaf8c842a..bcffa4448d 100644 --- a/cmd/kubelet/app/plugins.go +++ b/cmd/kubelet/app/plugins.go @@ -26,6 +26,7 @@ import ( // Volume plugins "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/aws_ebs" + "k8s.io/kubernetes/pkg/volume/cephfs" "k8s.io/kubernetes/pkg/volume/cinder" "k8s.io/kubernetes/pkg/volume/empty_dir" "k8s.io/kubernetes/pkg/volume/gce_pd" @@ -60,6 +61,8 @@ func ProbeVolumePlugins() []volume.VolumePlugin { allPlugins = append(allPlugins, persistent_claim.ProbeVolumePlugins()...) allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...) + return allPlugins } diff --git a/contrib/for-tests/volumes-tester/ceph/Dockerfile b/contrib/for-tests/volumes-tester/ceph/Dockerfile new file mode 100644 index 0000000000..5430e2c426 --- /dev/null +++ b/contrib/for-tests/volumes-tester/ceph/Dockerfile @@ -0,0 +1,12 @@ +FROM centos:6 +MAINTAINER Huamin Chen, hchen@redhat.com + +ADD install.sh /usr/local/bin/ +RUN /usr/local/bin/install.sh +ADD init.sh /usr/local/bin/ +ADD index.html /tmp/ +RUN chmod 644 /tmp/index.html + +EXPOSE 6789/tcp + +ENTRYPOINT ["/usr/local/bin/init.sh"] diff --git a/contrib/for-tests/volumes-tester/ceph/Makefile b/contrib/for-tests/volumes-tester/ceph/Makefile new file mode 100644 index 0000000000..2e46bfd768 --- /dev/null +++ b/contrib/for-tests/volumes-tester/ceph/Makefile @@ -0,0 +1,13 @@ +all: push + +TAG = 0.1 + +container: + docker build -t gcr.io/google_containers/volume-ceph . # Build new image and automatically tag it as latest + docker tag gcr.io/google_containers/volume-ceph gcr.io/google_containers/volume-ceph:$(TAG) # Add the version tag to the latest image + +push: container + gcloud preview docker push gcr.io/google_containers/volume-ceph # Push image tagged as latest to repository + gcloud preview docker push gcr.io/google_containers/volume-ceph:$(TAG) # Push version tagged image to repository (since this image is already pushed it will simply create or update version tag) + +clean: diff --git a/contrib/for-tests/volumes-tester/ceph/README.md b/contrib/for-tests/volumes-tester/ceph/README.md new file mode 100644 index 0000000000..71e575165e --- /dev/null +++ b/contrib/for-tests/volumes-tester/ceph/README.md @@ -0,0 +1,8 @@ +# Ceph server container for testing + +This container exports ceph fs with an index.html inside. + +Used by test/e2e/* to test CephFSVolumeSource. Not for production use! + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/for-tests/volumes-tester/ceph/README.md?pixel)]() diff --git a/contrib/for-tests/volumes-tester/ceph/index.html b/contrib/for-tests/volumes-tester/ceph/index.html new file mode 100644 index 0000000000..337469d71b --- /dev/null +++ b/contrib/for-tests/volumes-tester/ceph/index.html @@ -0,0 +1 @@ +Hello Ceph! diff --git a/contrib/for-tests/volumes-tester/ceph/init.sh b/contrib/for-tests/volumes-tester/ceph/init.sh new file mode 100755 index 0000000000..9cf3132939 --- /dev/null +++ b/contrib/for-tests/volumes-tester/ceph/init.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#set -e +set -x + +# clean up +rm -f /etc/ceph/* + +pkill -9 ceph-mon +pkill -9 ceph-osd +pkill -9 ceph-mds + +mkdir -p /var/lib/ceph +mkdir -p /var/lib/ceph/osd +mkdir -p /var/lib/ceph/osd/ceph-0 + +# create hostname for ceph monitor +MASTER=`hostname -s` + +ip=$(ip -4 -o a | grep eth0 | awk '{print $4}' | cut -d'/' -f1) +echo "$ip $MASTER" >> /etc/hosts + +#create ceph cluster +ceph-deploy --overwrite-conf new ${MASTER} +ceph-deploy --overwrite-conf mon create-initial ${MASTER} +ceph-deploy --overwrite-conf mon create ${MASTER} +ceph-deploy gatherkeys ${MASTER} + +# set osd params for minimal configuration +echo "osd crush chooseleaf type = 0" >> /etc/ceph/ceph.conf +echo "osd journal size = 100" >> /etc/ceph/ceph.conf +echo "osd pool default size = 1" >> /etc/ceph/ceph.conf +echo "osd pool default pgp num = 8" >> /etc/ceph/ceph.conf +echo "osd pool default pg num = 8" >> /etc/ceph/ceph.conf + +/sbin/service ceph -c /etc/ceph/ceph.conf stop mon.${MASTER} +/sbin/service ceph -c /etc/ceph/ceph.conf start mon.${MASTER} + +# create ceph osd +ceph osd create +ceph-osd -i 0 --mkfs --mkkey +ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-0/keyring +ceph osd crush add 0 1 root=default host=${MASTER} +ceph-osd -i 0 -k /var/lib/ceph/osd/ceph-0/keyring + +#see if we are ready to go +ceph osd tree + +# create ceph fs +ceph osd pool create cephfs_data 4 +ceph osd pool create cephfs_metadata 4 +ceph fs new cephfs cephfs_metadata cephfs_data +ceph-deploy --overwrite-conf mds create ${MASTER} + +# uncomment the following for rbd test +# ceph osd pool create kube 4 +# rbd create foo --size 10 --pool kube + +ps -ef |grep ceph +ceph osd dump + +# add new client with a pre defined keyring +# this keyring must match the ceph secret in e2e test +cat > /etc/ceph/ceph.client.kube.keyring <> ~/.ssh/authorized_keys2 +cat ~/.ssh/id_rsa.pub |awk '{print $1, $2, "Generated"}' >> ~/.ssh/authorized_keys + +rpm -Uvh http://ceph.com/rpm/rhel6/noarch/ceph-release-1-0.el6.noarch.rpm +yum install -y -q python-itsdangerous python-werkzeug python-jinja2 python-flask ceph-deploy epel-release +# ceph pkg depends on epel-release +yum install -y -q ceph ceph-fuse diff --git a/examples/cephfs/README.md b/examples/cephfs/README.md new file mode 100644 index 0000000000..b4903f1ee0 --- /dev/null +++ b/examples/cephfs/README.md @@ -0,0 +1,68 @@ + + + + +WARNING +WARNING +WARNING +WARNING +WARNING + +

PLEASE NOTE: This document applies to the HEAD of the source tree

+ +If you are using a released version of Kubernetes, you should +refer to the docs that go with that version. + + +The latest 1.0.x release of this document can be found +[here](http://releases.k8s.io/release-1.0/examples/cephfs/README.md). + +Documentation for other releases can be found at +[releases.k8s.io](http://releases.k8s.io). + +-- + + + + + +# How to Use it? + +Install Ceph on the Kubernetes host. For example, on Fedora 21 + + # yum -y install ceph + +If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/rootfs/ceph_docker) + +Then get the keyring from the Ceph cluster and copy it to */etc/ceph/keyring*. + +Once you have installed Ceph and new Kubernetes, you can create a pod based on my examples [cephfs.json](cephfs.json) and [cephfs-with-secret.json](cephfs-with-secret.json). In the pod JSON, you need to provide the following information. + +- *monitors*: Array of Ceph monitors. +- *user*: The RADOS user name. If not provided, default *admin* is used. +- *secretFile*: The path to the keyring file. If not provided, default */etc/ceph/user.secret* is used. +- *secretRef*: Reference to Ceph authentication secrets. If provided, *secret* overrides *secretFile*. +- *readOnly*: Whether the filesystem is used as readOnly. + + +Here are the commands: + +```console + # create a secret if you want to use Ceph secret instead of secret file + # cluster/kubectl.sh create -f examples/cephfs/secret/ceph-secret.yaml + + # cluster/kubectl.sh create -f examples/cephfs/v1beta3/cephfs.json + # cluster/kubectl.sh get pods +``` + + If you ssh to that machine, you can run `docker ps` to see the actual pod and `docker inspect` to see the volumes used by the container. + + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/cephfs/README.md?pixel)]() + diff --git a/examples/cephfs/cephfs-with-secret.json b/examples/cephfs/cephfs-with-secret.json new file mode 100644 index 0000000000..bdaefff301 --- /dev/null +++ b/examples/cephfs/cephfs-with-secret.json @@ -0,0 +1,39 @@ +{ + "apiVersion": "v1", + "id": "cephfs2", + "kind": "Pod", + "metadata": { + "name": "cephfs2" + }, + "spec": { + "containers": [ + { + "name": "cephfs-rw", + "image": "kubernetes/pause", + "volumeMounts": [ + { + "mountPath": "/mnt/cephfs", + "name": "cephfs" + } + ] + } + ], + "volumes": [ + { + "name": "cephfs", + "cephfs": { + "monitors": [ + "10.16.154.78:6789", + "10.16.154.82:6789", + "10.16.154.83:6789" + ], + "user": "admin", + "secretRef": { + "name": "ceph-secret" + }, + "readOnly": true + } + } + ] + } +} diff --git a/examples/cephfs/cephfs.json b/examples/cephfs/cephfs.json new file mode 100644 index 0000000000..38a73a7661 --- /dev/null +++ b/examples/cephfs/cephfs.json @@ -0,0 +1,37 @@ +{ + "apiVersion": "v1", + "id": "cephfs", + "kind": "Pod", + "metadata": { + "name": "cephfs" + }, + "spec": { + "containers": [ + { + "name": "cephfs-rw", + "image": "kubernetes/pause", + "volumeMounts": [ + { + "mountPath": "/mnt/cephfs", + "name": "cephfs" + } + ] + } + ], + "volumes": [ + { + "name": "cephfs", + "cephfs": { + "monitors": [ + "10.16.154.78:6789", + "10.16.154.82:6789", + "10.16.154.83:6789" + ], + "user": "admin", + "scretFile": "/etc/ceph/admin.secret", + "readOnly": true + } + } + ] + } +} diff --git a/examples/cephfs/secret/ceph-secret.yaml b/examples/cephfs/secret/ceph-secret.yaml new file mode 100644 index 0000000000..e29a5535ab --- /dev/null +++ b/examples/cephfs/secret/ceph-secret.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ceph-secret +data: + key: QVFCMTZWMVZvRjVtRXhBQTVrQ1FzN2JCajhWVUxSdzI2Qzg0SEE9PQ== diff --git a/examples/examples_test.go b/examples/examples_test.go index f7d957c9d5..6a5e1b7ae7 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -332,6 +332,10 @@ func TestExampleObjectSchemas(t *testing.T) { "zookeeper-service": &api.Service{}, "zookeeper": &api.Pod{}, }, + "../examples/cephfs/": { + "cephfs": &api.Pod{}, + "cephfs-with-secret": &api.Pod{}, + }, } capabilities.SetForTests(capabilities.Capabilities{ diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index c6eb5df9fa..2494d972f8 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -71,6 +71,29 @@ func deepCopy_api_Capabilities(in Capabilities, out *Capabilities, c *conversion return nil } +func deepCopy_api_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error { + if in.Monitors != nil { + out.Monitors = make([]string, len(in.Monitors)) + for i := range in.Monitors { + out.Monitors[i] = in.Monitors[i] + } + } else { + out.Monitors = nil + } + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + out.SecretRef = new(LocalObjectReference) + if err := deepCopy_api_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { + return err + } + } else { + out.SecretRef = nil + } + out.ReadOnly = in.ReadOnly + return nil +} + func deepCopy_api_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error { out.VolumeID = in.VolumeID out.FSType = in.FSType @@ -1193,6 +1216,14 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(CephFSVolumeSource) + if err := deepCopy_api_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -2133,6 +2164,14 @@ func deepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(CephFSVolumeSource) + if err := deepCopy_api_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -2173,6 +2212,7 @@ func init() { deepCopy_api_AWSElasticBlockStoreVolumeSource, deepCopy_api_Binding, deepCopy_api_Capabilities, + deepCopy_api_CephFSVolumeSource, deepCopy_api_CinderVolumeSource, deepCopy_api_ComponentCondition, deepCopy_api_ComponentStatus, diff --git a/pkg/api/types.go b/pkg/api/types.go index 097d4a15f6..a5ff12e1d2 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -226,6 +226,8 @@ type VolumeSource struct { RBD *RBDVolumeSource `json:"rbd,omitempty"` // Cinder represents a cinder volume attached and mounted on kubelets host machine Cinder *CinderVolumeSource `json:"cinder,omitempty"` + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -252,6 +254,8 @@ type PersistentVolumeSource struct { ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"` // Cinder represents a cinder volume attached and mounted on kubelets host machine Cinder *CinderVolumeSource `json:"cinder,omitempty"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` } type PersistentVolumeClaimVolumeSource struct { @@ -577,6 +581,21 @@ type CinderVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty"` } +// CephFSVolumeSource represents a Ceph Filesystem Mount that lasts the lifetime of a pod +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string `json:"monitors"` + // Optional: User is the rados user name, default is admin + User string `json:"user,omitempty"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + SecretFile string `json:"secretFile,omitempty"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + SecretRef *LocalObjectReference `json:"secretRef,omitempty"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + ReadOnly bool `json:"readOnly,omitempty"` +} + // ContainerPort represents a network port in a single container type ContainerPort struct { // Optional: If specified, this must be an IANA_SVC_NAME Each named port diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index ec278caf6c..9974573c0f 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -76,6 +76,32 @@ func convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capa return nil } +func convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.CephFSVolumeSource))(in) + } + if in.Monitors != nil { + out.Monitors = make([]string, len(in.Monitors)) + for i := range in.Monitors { + out.Monitors[i] = in.Monitors[i] + } + } else { + out.Monitors = nil + } + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + out.SecretRef = new(LocalObjectReference) + if err := convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { + return err + } + } else { + out.SecretRef = nil + } + out.ReadOnly = in.ReadOnly + return nil +} + func convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.CinderVolumeSource))(in) @@ -1373,6 +1399,14 @@ func convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.Per } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(CephFSVolumeSource) + if err := convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -2363,6 +2397,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *Volu } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(CephFSVolumeSource) + if err := convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -2416,6 +2458,32 @@ func convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capa return nil } +func convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*CephFSVolumeSource))(in) + } + if in.Monitors != nil { + out.Monitors = make([]string, len(in.Monitors)) + for i := range in.Monitors { + out.Monitors[i] = in.Monitors[i] + } + } else { + out.Monitors = nil + } + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + out.SecretRef = new(api.LocalObjectReference) + if err := convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { + return err + } + } else { + out.SecretRef = nil + } + out.ReadOnly = in.ReadOnly + return nil +} + func convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*CinderVolumeSource))(in) @@ -3713,6 +3781,14 @@ func convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Persist } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(api.CephFSVolumeSource) + if err := convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -4703,6 +4779,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.Volu } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(api.CephFSVolumeSource) + if err := convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -4711,6 +4795,7 @@ func init() { convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, convert_api_Binding_To_v1_Binding, convert_api_Capabilities_To_v1_Capabilities, + convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, convert_api_ComponentCondition_To_v1_ComponentCondition, convert_api_ComponentStatusList_To_v1_ComponentStatusList, @@ -4826,6 +4911,7 @@ func init() { convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, convert_v1_Binding_To_api_Binding, convert_v1_Capabilities_To_api_Capabilities, + convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, convert_v1_ComponentCondition_To_api_ComponentCondition, convert_v1_ComponentStatusList_To_api_ComponentStatusList, diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index 24a9bf6d75..5b06c08a3d 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -86,6 +86,29 @@ func deepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion. return nil } +func deepCopy_v1_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error { + if in.Monitors != nil { + out.Monitors = make([]string, len(in.Monitors)) + for i := range in.Monitors { + out.Monitors[i] = in.Monitors[i] + } + } else { + out.Monitors = nil + } + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + out.SecretRef = new(LocalObjectReference) + if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { + return err + } + } else { + out.SecretRef = nil + } + out.ReadOnly = in.ReadOnly + return nil +} + func deepCopy_v1_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error { out.VolumeID = in.VolumeID out.FSType = in.FSType @@ -1192,6 +1215,14 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(CephFSVolumeSource) + if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -2138,6 +2169,14 @@ func deepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion. } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(CephFSVolumeSource) + if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -2175,6 +2214,7 @@ func init() { deepCopy_v1_AWSElasticBlockStoreVolumeSource, deepCopy_v1_Binding, deepCopy_v1_Capabilities, + deepCopy_v1_CephFSVolumeSource, deepCopy_v1_CinderVolumeSource, deepCopy_v1_ComponentCondition, deepCopy_v1_ComponentStatus, diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 9719bf5269..8082c1244f 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -280,6 +280,8 @@ type VolumeSource struct { // Cinder represents a cinder volume attached and mounted on kubelets host machine // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md Cinder *CinderVolumeSource `json:"cinder,omitempty"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -328,6 +330,8 @@ type PersistentVolumeSource struct { // Cinder represents a cinder volume attached and mounted on kubelets host machine // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md Cinder *CinderVolumeSource `json:"cinder,omitempty"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` } // PersistentVolume (PV) is a storage resource provisioned by an administrator. @@ -594,6 +598,26 @@ type CinderVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty"` } +// CephFSVolumeSource represents a Ceph Filesystem Mount that lasts the lifetime of a pod +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + Monitors []string `json:"monitors"` + // Optional: User is the rados user name, default is admin + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + User string `json:"user,omitempty"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + SecretFile string `json:"secretFile,omitempty"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + SecretRef *LocalObjectReference `json:"secretRef,omitempty"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it + ReadOnly bool `json:"readOnly,omitempty"` +} + const ( StorageMediumDefault StorageMedium = "" // use whatever the default is for the node StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) diff --git a/pkg/api/v1/types_swagger_doc_generated.go b/pkg/api/v1/types_swagger_doc_generated.go index c16300eacc..fc780dc32e 100644 --- a/pkg/api/v1/types_swagger_doc_generated.go +++ b/pkg/api/v1/types_swagger_doc_generated.go @@ -69,6 +69,19 @@ func (Capabilities) SwaggerDoc() map[string]string { return map_Capabilities } +var map_CephFSVolumeSource = map[string]string{ + "": "CephFSVolumeSource represents a Ceph Filesystem Mount that lasts the lifetime of a pod", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it", +} + +func (CephFSVolumeSource) SwaggerDoc() map[string]string { + return map_CephFSVolumeSource +} + var map_CinderVolumeSource = map[string]string{ "": "CinderVolumeSource represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet.", "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", @@ -800,6 +813,7 @@ var map_PersistentVolumeSource = map[string]string{ "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1375,6 +1389,7 @@ var map_VolumeSource = map[string]string{ "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md", "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", } func (VolumeSource) SwaggerDoc() map[string]string { diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index 27151e92a8..eb72371b39 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -379,6 +379,10 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList { numVolumes++ allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder).Prefix("cinder")...) } + if source.CephFS != nil { + numVolumes++ + allErrs = append(allErrs, validateCephFS(source.CephFS).Prefix("cephfs")...) + } if numVolumes != 1 { allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required")) } @@ -513,6 +517,14 @@ func validateCinderVolumeSource(cd *api.CinderVolumeSource) errs.ValidationError return allErrs } +func validateCephFS(cephfs *api.CephFSVolumeSource) errs.ValidationErrorList { + allErrs := errs.ValidationErrorList{} + if len(cephfs.Monitors) == 0 { + allErrs = append(allErrs, errs.NewFieldRequired("monitors")) + } + return allErrs +} + func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) { return NameIsDNSSubdomain(name, prefix) } @@ -568,6 +580,10 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) errs.ValidationErrorList numVolumes++ allErrs = append(allErrs, validateRBD(pv.Spec.RBD).Prefix("rbd")...) } + if pv.Spec.CephFS != nil { + numVolumes++ + allErrs = append(allErrs, validateCephFS(pv.Spec.CephFS).Prefix("cephfs")...) + } if pv.Spec.ISCSI != nil { numVolumes++ allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI).Prefix("iscsi")...) diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index d99028e9ff..47f724029a 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -457,12 +457,13 @@ func TestValidateVolumes(t *testing.T) { {Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host1", Path: "path", ReadOnly: false}}}, {Name: "rbd", VolumeSource: api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "bar", FSType: "ext4"}}}, {Name: "cinder", VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{"29ea5088-4f60-4757-962e-dba678767887", "ext4", false}}}, + {Name: "cephfs", VolumeSource: api.VolumeSource{CephFS: &api.CephFSVolumeSource{Monitors: []string{"foo"}}}}, } names, errs := validateVolumes(successCase) if len(errs) != 0 { t.Errorf("expected success: %v", errs) } - if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder") { + if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder", "cephfs") { t.Errorf("wrong names result: %v", names) } emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}} @@ -472,6 +473,7 @@ func TestValidateVolumes(t *testing.T) { emptyPath := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host", Path: "", ReadOnly: false}} emptyMon := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{}, RBDImage: "bar", FSType: "ext4"}} emptyImage := api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "", FSType: "ext4"}} + emptyCephFSMon := api.VolumeSource{CephFS: &api.CephFSVolumeSource{Monitors: []string{}}} errorCases := map[string]struct { V []api.Volume T errors.ValidationErrorType @@ -487,6 +489,7 @@ func TestValidateVolumes(t *testing.T) { "empty path": {[]api.Volume{{Name: "badpath", VolumeSource: emptyPath}}, errors.ValidationErrorTypeRequired, "[0].source.glusterfs.path"}, "empty mon": {[]api.Volume{{Name: "badmon", VolumeSource: emptyMon}}, errors.ValidationErrorTypeRequired, "[0].source.rbd.monitors"}, "empty image": {[]api.Volume{{Name: "badimage", VolumeSource: emptyImage}}, errors.ValidationErrorTypeRequired, "[0].source.rbd.image"}, + "empty cephfs mon": {[]api.Volume{{Name: "badmon", VolumeSource: emptyCephFSMon}}, errors.ValidationErrorTypeRequired, "[0].source.cephfs.monitors"}, } for k, v := range errorCases { _, errs := validateVolumes(v.V) diff --git a/pkg/expapi/deep_copy_generated.go b/pkg/expapi/deep_copy_generated.go index f61479a258..774f19e9b6 100644 --- a/pkg/expapi/deep_copy_generated.go +++ b/pkg/expapi/deep_copy_generated.go @@ -56,6 +56,29 @@ func deepCopy_api_Capabilities(in api.Capabilities, out *api.Capabilities, c *co return nil } +func deepCopy_api_CephFSVolumeSource(in api.CephFSVolumeSource, out *api.CephFSVolumeSource, c *conversion.Cloner) error { + if in.Monitors != nil { + out.Monitors = make([]string, len(in.Monitors)) + for i := range in.Monitors { + out.Monitors[i] = in.Monitors[i] + } + } else { + out.Monitors = nil + } + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + out.SecretRef = new(api.LocalObjectReference) + if err := deepCopy_api_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { + return err + } + } else { + out.SecretRef = nil + } + out.ReadOnly = in.ReadOnly + return nil +} + func deepCopy_api_CinderVolumeSource(in api.CinderVolumeSource, out *api.CinderVolumeSource, c *conversion.Cloner) error { out.VolumeID = in.VolumeID out.FSType = in.FSType @@ -677,6 +700,14 @@ func deepCopy_api_VolumeSource(in api.VolumeSource, out *api.VolumeSource, c *co } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(api.CephFSVolumeSource) + if err := deepCopy_api_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -1060,6 +1091,7 @@ func init() { err := api.Scheme.AddGeneratedDeepCopyFuncs( deepCopy_api_AWSElasticBlockStoreVolumeSource, deepCopy_api_Capabilities, + deepCopy_api_CephFSVolumeSource, deepCopy_api_CinderVolumeSource, deepCopy_api_Container, deepCopy_api_ContainerPort, diff --git a/pkg/expapi/v1/conversion_generated.go b/pkg/expapi/v1/conversion_generated.go index a6409c8b8e..24be593816 100644 --- a/pkg/expapi/v1/conversion_generated.go +++ b/pkg/expapi/v1/conversion_generated.go @@ -62,6 +62,32 @@ func convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.C return nil } +func convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.CephFSVolumeSource))(in) + } + if in.Monitors != nil { + out.Monitors = make([]string, len(in.Monitors)) + for i := range in.Monitors { + out.Monitors[i] = in.Monitors[i] + } + } else { + out.Monitors = nil + } + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + out.SecretRef = new(v1.LocalObjectReference) + if err := convert_api_LocalObjectReference_To_v1_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { + return err + } + } else { + out.SecretRef = nil + } + out.ReadOnly = in.ReadOnly + return nil +} + func convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.CinderVolumeSource))(in) @@ -722,6 +748,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.V } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(v1.CephFSVolumeSource) + if err := convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -759,6 +793,32 @@ func convert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.C return nil } +func convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*v1.CephFSVolumeSource))(in) + } + if in.Monitors != nil { + out.Monitors = make([]string, len(in.Monitors)) + for i := range in.Monitors { + out.Monitors[i] = in.Monitors[i] + } + } else { + out.Monitors = nil + } + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + out.SecretRef = new(api.LocalObjectReference) + if err := convert_v1_LocalObjectReference_To_api_LocalObjectReference(in.SecretRef, out.SecretRef, s); err != nil { + return err + } + } else { + out.SecretRef = nil + } + out.ReadOnly = in.ReadOnly + return nil +} + func convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*v1.CinderVolumeSource))(in) @@ -1419,6 +1479,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.V } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(api.CephFSVolumeSource) + if err := convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in.CephFS, out.CephFS, s); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -2168,6 +2236,7 @@ func init() { err := api.Scheme.AddGeneratedConversionFuncs( convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, convert_api_Capabilities_To_v1_Capabilities, + convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, convert_api_ContainerPort_To_v1_ContainerPort, convert_api_Container_To_v1_Container, @@ -2226,6 +2295,7 @@ func init() { convert_v1_APIVersion_To_expapi_APIVersion, convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, convert_v1_Capabilities_To_api_Capabilities, + convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, convert_v1_ContainerPort_To_api_ContainerPort, convert_v1_Container_To_api_Container, diff --git a/pkg/expapi/v1/deep_copy_generated.go b/pkg/expapi/v1/deep_copy_generated.go index a73e1b458d..78839c379c 100644 --- a/pkg/expapi/v1/deep_copy_generated.go +++ b/pkg/expapi/v1/deep_copy_generated.go @@ -73,6 +73,29 @@ func deepCopy_v1_Capabilities(in v1.Capabilities, out *v1.Capabilities, c *conve return nil } +func deepCopy_v1_CephFSVolumeSource(in v1.CephFSVolumeSource, out *v1.CephFSVolumeSource, c *conversion.Cloner) error { + if in.Monitors != nil { + out.Monitors = make([]string, len(in.Monitors)) + for i := range in.Monitors { + out.Monitors[i] = in.Monitors[i] + } + } else { + out.Monitors = nil + } + out.User = in.User + out.SecretFile = in.SecretFile + if in.SecretRef != nil { + out.SecretRef = new(v1.LocalObjectReference) + if err := deepCopy_v1_LocalObjectReference(*in.SecretRef, out.SecretRef, c); err != nil { + return err + } + } else { + out.SecretRef = nil + } + out.ReadOnly = in.ReadOnly + return nil +} + func deepCopy_v1_CinderVolumeSource(in v1.CinderVolumeSource, out *v1.CinderVolumeSource, c *conversion.Cloner) error { out.VolumeID = in.VolumeID out.FSType = in.FSType @@ -695,6 +718,14 @@ func deepCopy_v1_VolumeSource(in v1.VolumeSource, out *v1.VolumeSource, c *conve } else { out.Cinder = nil } + if in.CephFS != nil { + out.CephFS = new(v1.CephFSVolumeSource) + if err := deepCopy_v1_CephFSVolumeSource(*in.CephFS, out.CephFS, c); err != nil { + return err + } + } else { + out.CephFS = nil + } return nil } @@ -1068,6 +1099,7 @@ func init() { deepCopy_resource_Quantity, deepCopy_v1_AWSElasticBlockStoreVolumeSource, deepCopy_v1_Capabilities, + deepCopy_v1_CephFSVolumeSource, deepCopy_v1_CinderVolumeSource, deepCopy_v1_Container, deepCopy_v1_ContainerPort, diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go new file mode 100644 index 0000000000..fadd2a63cf --- /dev/null +++ b/pkg/volume/cephfs/cephfs.go @@ -0,0 +1,265 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cephfs + +import ( + "fmt" + "os" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume" +) + +// This is the primary entrypoint for volume plugins. +func ProbeVolumePlugins() []volume.VolumePlugin { + return []volume.VolumePlugin{&cephfsPlugin{nil}} +} + +type cephfsPlugin struct { + host volume.VolumeHost +} + +var _ volume.VolumePlugin = &cephfsPlugin{} + +const ( + cephfsPluginName = "kubernetes.io/cephfs" +) + +func (plugin *cephfsPlugin) Init(host volume.VolumeHost) { + plugin.host = host +} + +func (plugin *cephfsPlugin) Name() string { + return cephfsPluginName +} + +func (plugin *cephfsPlugin) CanSupport(spec *volume.Spec) bool { + return spec.VolumeSource.CephFS != nil || spec.PersistentVolumeSource.CephFS != nil +} + +func (plugin *cephfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode { + return []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + api.ReadWriteMany, + } +} + +func (plugin *cephfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { + cephvs := plugin.getVolumeSource(spec) + secret := "" + if cephvs.SecretRef != nil { + kubeClient := plugin.host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot get kube client") + } + + secretName, err := kubeClient.Secrets(pod.Namespace).Get(cephvs.SecretRef.Name) + if err != nil { + err = fmt.Errorf("Couldn't get secret %v/%v err: %v", pod.Namespace, cephvs.SecretRef, err) + return nil, err + } + for name, data := range secretName.Data { + secret = string(data) + glog.V(1).Infof("found ceph secret info: %s", name) + } + } + return plugin.newBuilderInternal(spec, pod.UID, mounter, secret) +} + +func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Builder, error) { + cephvs := plugin.getVolumeSource(spec) + id := cephvs.User + if id == "" { + id = "admin" + } + secret_file := cephvs.SecretFile + if secret_file == "" { + secret_file = "/etc/ceph/" + id + ".secret" + } + + return &cephfsBuilder{ + cephfs: &cephfs{ + podUID: podUID, + volName: spec.Name, + mon: cephvs.Monitors, + secret: secret, + id: id, + secret_file: secret_file, + readonly: cephvs.ReadOnly, + mounter: mounter, + plugin: plugin}, + }, nil +} + +func (plugin *cephfsPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { + return plugin.newCleanerInternal(volName, podUID, mounter) +} + +func (plugin *cephfsPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { + return &cephfsCleaner{ + cephfs: &cephfs{ + podUID: podUID, + volName: volName, + mounter: mounter, + plugin: plugin}, + }, nil +} + +func (plugin *cephfsPlugin) getVolumeSource(spec *volume.Spec) *api.CephFSVolumeSource { + if spec.VolumeSource.CephFS != nil { + return spec.VolumeSource.CephFS + } else { + return spec.PersistentVolumeSource.CephFS + } +} + +// CephFS volumes represent a bare host file or directory mount of an CephFS export. +type cephfs struct { + volName string + podUID types.UID + mon []string + id string + secret string + secret_file string + readonly bool + mounter mount.Interface + plugin *cephfsPlugin +} + +type cephfsBuilder struct { + *cephfs +} + +var _ volume.Builder = &cephfsBuilder{} + +// SetUp attaches the disk and bind mounts to the volume path. +func (cephfsVolume *cephfsBuilder) SetUp() error { + return cephfsVolume.SetUpAt(cephfsVolume.GetPath()) +} + +// SetUpAt attaches the disk and bind mounts to the volume path. +func (cephfsVolume *cephfsBuilder) SetUpAt(dir string) error { + notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir) + glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err) + if err != nil && !os.IsNotExist(err) { + return err + } + if !notMnt { + return nil + } + os.MkdirAll(dir, 0750) + + err = cephfsVolume.execMount(dir) + if err == nil { + return nil + } + + // cleanup upon failure + cephfsVolume.cleanup(dir) + // return error + return err +} + +func (cephfsVolume *cephfsBuilder) IsReadOnly() bool { + return cephfsVolume.readonly +} + +type cephfsCleaner struct { + *cephfs +} + +var _ volume.Cleaner = &cephfsCleaner{} + +// TearDown unmounts the bind mount +func (cephfsVolume *cephfsCleaner) TearDown() error { + return cephfsVolume.TearDownAt(cephfsVolume.GetPath()) +} + +// TearDownAt unmounts the bind mount +func (cephfsVolume *cephfsCleaner) TearDownAt(dir string) error { + return cephfsVolume.cleanup(dir) +} + +// GatePath creates global mount path +func (cephfsVolume *cephfs) GetPath() string { + name := cephfsPluginName + return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, util.EscapeQualifiedNameForDisk(name), cephfsVolume.volName) +} + +func (cephfsVolume *cephfs) cleanup(dir string) error { + noMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("CephFS: Error checking IsLikelyNotMountPoint: %v", err) + } + if noMnt { + return os.RemoveAll(dir) + } + + if err := cephfsVolume.mounter.Unmount(dir); err != nil { + return fmt.Errorf("CephFS: Unmounting failed: %v", err) + } + noMnt, mntErr := cephfsVolume.mounter.IsLikelyNotMountPoint(dir) + if mntErr != nil { + return fmt.Errorf("CephFS: IsMountpoint check failed: %v", mntErr) + } + if noMnt { + if err := os.RemoveAll(dir); err != nil { + return fmt.Errorf("CephFS: removeAll %s/%v", dir, err) + } + } + + return nil +} + +func (cephfsVolume *cephfs) execMount(mountpoint string) error { + // cephfs mount option + ceph_opt := "" + // override secretfile if secret is provided + if cephfsVolume.secret != "" { + ceph_opt = "name=" + cephfsVolume.id + ",secret=" + cephfsVolume.secret + } else { + ceph_opt = "name=" + cephfsVolume.id + ",secretfile=" + cephfsVolume.secret_file + } + // build option array + opt := []string{} + if cephfsVolume.readonly { + opt = append(opt, "ro") + } + opt = append(opt, ceph_opt) + + // build src like mon1:6789,mon2:6789,mon3:6789:/ + hosts := cephfsVolume.mon + l := len(hosts) + // pass all monitors and let ceph randomize and fail over + i := 0 + src := "" + for i = 0; i < l-1; i++ { + src += hosts[i] + "," + } + src += hosts[i] + ":/" + + if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil { + return fmt.Errorf("CephFS: mount failed: %v", err) + } + + return nil +} diff --git a/pkg/volume/cephfs/cephfs_test.go b/pkg/volume/cephfs/cephfs_test.go new file mode 100644 index 0000000000..a9cd175152 --- /dev/null +++ b/pkg/volume/cephfs/cephfs_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cephfs + +import ( + "os" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/volume" +) + +func TestCanSupport(t *testing.T) { + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil)) + plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + if plug.Name() != "kubernetes.io/cephfs" { + t.Errorf("Wrong name: %s", plug.Name()) + } + if plug.CanSupport(&volume.Spec{Name: "foo", VolumeSource: api.VolumeSource{}}) { + t.Errorf("Expected false") + } + if !plug.CanSupport(&volume.Spec{Name: "foo", VolumeSource: api.VolumeSource{CephFS: &api.CephFSVolumeSource{}}}) { + t.Errorf("Expected true") + } +} + +func TestPlugin(t *testing.T) { + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + spec := &api.Volume{ + Name: "vol1", + VolumeSource: api.VolumeSource{ + CephFS: &api.CephFSVolumeSource{ + Monitors: []string{"a", "b"}, + User: "user", + SecretRef: nil, + SecretFile: "/etc/ceph/user.secret", + }, + }, + } + + builder, err := plug.(*cephfsPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets") + volumePath := builder.GetPath() + if err != nil { + t.Errorf("Failed to make a new Builder: %v", err) + } + if builder == nil { + t.Errorf("Got a nil Builder: %v") + } + path := builder.GetPath() + if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~cephfs/vol1" { + t.Errorf("Got unexpected path: %s", path) + } + if err := builder.SetUp(); err != nil { + t.Errorf("Expected success, got: %v", err) + } + if _, err := os.Stat(volumePath); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, volume path not created: %s", volumePath) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + cleaner, err := plug.(*cephfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) + if err != nil { + t.Errorf("Failed to make a new Cleaner: %v", err) + } + if cleaner == nil { + t.Errorf("Got a nil Cleaner: %v") + } + if err := cleaner.TearDown(); err != nil { + t.Errorf("Expected success, got: %v", err) + } + if _, err := os.Stat(volumePath); err == nil { + t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) + } else if !os.IsNotExist(err) { + t.Errorf("SetUp() failed: %v", err) + } +} diff --git a/pkg/volume/cephfs/doc.go b/pkg/volume/cephfs/doc.go new file mode 100644 index 0000000000..7ce1ec7467 --- /dev/null +++ b/pkg/volume/cephfs/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package nfs contains the internal representation of Ceph file system +// (CephFS) volumes. +package cephfs diff --git a/test/e2e/volumes.go b/test/e2e/volumes.go index ac37823f3b..a95d23d791 100644 --- a/test/e2e/volumes.go +++ b/test/e2e/volumes.go @@ -478,5 +478,73 @@ var _ = Describe("Volumes", func() { }) }) + //////////////////////////////////////////////////////////////////////// + // Ceph + //////////////////////////////////////////////////////////////////////// + + // Marked with [Skipped] to skip the test by default (see driver.go), + // the test needs privileged containers, which are disabled by default. + // Run the test with "go run hack/e2e.go ... --ginkgo.focus=Volume" + Describe("[Skipped] CephFS", func() { + It("should be mountable", func() { + config := VolumeTestConfig{ + namespace: namespace.Name, + prefix: "cephfs", + serverImage: "gcr.io/google_containers/volume-ceph", + serverPorts: []int{6789}, + } + + defer func() { + if clean { + volumeTestCleanup(c, config) + } + }() + pod := startVolumeServer(c, config) + serverIP := pod.Status.PodIP + Logf("Ceph server IP address: %v", serverIP) + By("sleeping a bit to give ceph server time to initialize") + time.Sleep(20 * time.Second) + + // create ceph secret + secret := &api.Secret{ + TypeMeta: api.TypeMeta{ + Kind: "Secret", + APIVersion: "v1beta3", + }, + ObjectMeta: api.ObjectMeta{ + Name: config.prefix + "-secret", + }, + // Must use the ceph keyring at contrib/for-tests/volumes-ceph/ceph/init.sh + // and encode in base64 + Data: map[string][]byte{ + "key": []byte("AQAMgXhVwBCeDhAA9nlPaFyfUSatGD4drFWDvQ=="), + }, + } + + defer func() { + if clean { + if err := c.Secrets(namespace.Name).Delete(secret.Name); err != nil { + Failf("unable to delete secret %v: %v", secret.Name, err) + } + } + }() + + var err error + if secret, err = c.Secrets(namespace.Name).Create(secret); err != nil { + Failf("unable to create test secret %s: %v", secret.Name, err) + } + + volume := api.VolumeSource{ + CephFS: &api.CephFSVolumeSource{ + Monitors: []string{serverIP + ":6789"}, + User: "kube", + SecretRef: &api.LocalObjectReference{Name: config.prefix + "-secret"}, + ReadOnly: true, + }, + } + // Must match content of contrib/for-tests/volumes-ceph/ceph/index.html + testVolumeClient(c, config, volume, "Hello Ceph!") + }) + }) })