Revert "Volume Plugin for Cinder; Openstack Block Storage"

pull/6/head
Prashanth B 2015-08-28 10:56:24 -07:00
parent 8d0d54ffed
commit 05b205bab6
23 changed files with 8 additions and 1248 deletions

View File

@ -12066,10 +12066,6 @@
"$ref": "v1.ISCSIVolumeSource",
"description": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin."
},
"cinder": {
"$ref": "v1.CinderVolumeSource",
"description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"accessModes": {
"type": "array",
"items": {
@ -12290,27 +12286,6 @@
}
}
},
"v1.CinderVolumeSource": {
"id": "v1.CinderVolumeSource",
"description": "CinderVolumeSource represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet.",
"required": [
"volumeID"
],
"properties": {
"volumeID": {
"type": "string",
"description": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"fsType": {
"type": "string",
"description": "Required: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Only ext3 and ext4 are allowed More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"readOnly": {
"type": "boolean",
"description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
}
}
},
"v1.PersistentVolumeStatus": {
"id": "v1.PersistentVolumeStatus",
"description": "PersistentVolumeStatus is the current status of a persistent volume.",
@ -12505,10 +12480,6 @@
"rbd": {
"$ref": "v1.RBDVolumeSource",
"description": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md"
},
"cinder": {
"$ref": "v1.CinderVolumeSource",
"description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
}
}
},

View File

@ -26,7 +26,6 @@ import (
// Volume plugins
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/aws_ebs"
"k8s.io/kubernetes/pkg/volume/cinder"
"k8s.io/kubernetes/pkg/volume/empty_dir"
"k8s.io/kubernetes/pkg/volume/gce_pd"
"k8s.io/kubernetes/pkg/volume/git_repo"
@ -59,7 +58,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, persistent_claim.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -1,84 +0,0 @@
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<strong>
The latest 1.0.x release of this document can be found
[here](http://releases.k8s.io/release-1.0/examples/mysql-cinder-pd/README.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# Mysql installation with cinder volume plugin
Cinder is a Block Storage service for OpenStack. This example shows how it can be used as an attachment mounted to a pod in Kubernets.
### Prerequisites
Start kubelet with cloud provider as openstack with a valid cloud config
Sample cloud_config:
```
[Global]
auth-url=https://os-identity.vip.foo.bar.com:5443/v2.0
username=user
password=pass
region=region1
tenant-id=0c331a1df18571594d49fe68asa4e
```
Currently the cinder volume plugin is designed to work only on linux hosts and offers ext4 and ext3 as supported fs types
Make sure that kubelet host machine has the following executables
```
/bin/lsblk -- To Find out the fstype of the volume
/sbin/mkfs.ext3 and /sbin/mkfs.ext4 -- To format the volume if required
/usr/bin/udevadm -- To probe the volume attached so that a symlink is created under /dev/disk/by-id/ with a virtio- prefix
```
Ensure cinder is installed and configured properly in the region in which kubelet is spun up
### Example
Create a cinder volume Ex:
`cinder create --display-name=test-repo 2`
Use the id of the cinder volume created to create a pod [definition](mysql.yaml)
Create a new pod with the definition
`cluster/kubectl.sh create -f examples/mysql-cinder-pd/mysql.yaml`
This should now
1. Attach the specified volume to the kubelet's host machine
2. Format the volume if required (only if the volume specified is not already formatted to the fstype specified)
3. Mount it on the kubelet's host machine
4. Spin up a container with this volume mounted to the path specified in the pod definition
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/mysql-cinder-pd/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
name: mysql
name: mysql
spec:
ports:
# the port that this service should serve on
- port: 3306
# label keys and values that must match in order to receive traffic for this service
selector:
name: mysql

View File

@ -1,30 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: mysql
labels:
name: mysql
spec:
containers:
- resources:
limits :
cpu: 0.5
image: mysql
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
# change this
value: yourpassword
ports:
- containerPort: 3306
name: mysql
volumeMounts:
# name must match the volume name below
- name: mysql-persistent-storage
# mount path within the container
mountPath: /var/lib/mysql
volumes:
- name: mysql-persistent-storage
cinder:
volumeID: bd82f7e2-wece-4c01-a505-4acf60b07f4a
fsType: ext4

View File

@ -71,13 +71,6 @@ func deepCopy_api_Capabilities(in Capabilities, out *Capabilities, c *conversion
return nil
}
func deepCopy_api_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_api_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
@ -1161,14 +1154,6 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist
} else {
out.ISCSI = nil
}
if in.Cinder != nil {
out.Cinder = new(CinderVolumeSource)
if err := deepCopy_api_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
return err
}
} else {
out.Cinder = nil
}
return nil
}
@ -2101,14 +2086,6 @@ func deepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion
} else {
out.RBD = nil
}
if in.Cinder != nil {
out.Cinder = new(CinderVolumeSource)
if err := deepCopy_api_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
return err
}
} else {
out.Cinder = nil
}
return nil
}
@ -2149,7 +2126,6 @@ func init() {
deepCopy_api_AWSElasticBlockStoreVolumeSource,
deepCopy_api_Binding,
deepCopy_api_Capabilities,
deepCopy_api_CinderVolumeSource,
deepCopy_api_ComponentCondition,
deepCopy_api_ComponentStatus,
deepCopy_api_ComponentStatusList,

View File

@ -224,8 +224,6 @@ type VolumeSource struct {
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd,omitempty"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -250,8 +248,6 @@ type PersistentVolumeSource struct {
// ISCSIVolumeSource represents an ISCSI resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -562,21 +558,6 @@ type RBDVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// CinderVolumeSource represents a cinder volume resource in Openstack.
// A Cinder volume must exist and be formatted before mounting to a container.
// The volume must also be in the same region as the kubelet.
type CinderVolumeSource struct {
// Unique id of the volume used to identify the cinder volume
VolumeID string `json:"volumeID"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Only ext3 and ext4 are allowed
FSType string `json:"fsType,omitempty"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
ReadOnly bool `json:"readOnly,omitempty"`
}
// ContainerPort represents a network port in a single container
type ContainerPort struct {
// Optional: If specified, this must be an IANA_SVC_NAME Each named port

View File

@ -76,16 +76,6 @@ func convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capa
return nil
}
func convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.CinderVolumeSource))(in)
}
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.ComponentCondition))(in)
@ -1341,14 +1331,6 @@ func convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.Per
} else {
out.ISCSI = nil
}
if in.Cinder != nil {
out.Cinder = new(CinderVolumeSource)
if err := convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
return err
}
} else {
out.Cinder = nil
}
return nil
}
@ -2331,14 +2313,6 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *Volu
} else {
out.RBD = nil
}
if in.Cinder != nil {
out.Cinder = new(CinderVolumeSource)
if err := convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
return err
}
} else {
out.Cinder = nil
}
return nil
}
@ -2392,16 +2366,6 @@ func convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capa
return nil
}
func convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*CinderVolumeSource))(in)
}
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ComponentCondition))(in)
@ -3657,14 +3621,6 @@ func convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Persist
} else {
out.ISCSI = nil
}
if in.Cinder != nil {
out.Cinder = new(api.CinderVolumeSource)
if err := convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
return err
}
} else {
out.Cinder = nil
}
return nil
}
@ -4647,14 +4603,6 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.Volu
} else {
out.RBD = nil
}
if in.Cinder != nil {
out.Cinder = new(api.CinderVolumeSource)
if err := convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
return err
}
} else {
out.Cinder = nil
}
return nil
}
@ -4663,7 +4611,6 @@ func init() {
convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource,
convert_api_Binding_To_v1_Binding,
convert_api_Capabilities_To_v1_Capabilities,
convert_api_CinderVolumeSource_To_v1_CinderVolumeSource,
convert_api_ComponentCondition_To_v1_ComponentCondition,
convert_api_ComponentStatusList_To_v1_ComponentStatusList,
convert_api_ComponentStatus_To_v1_ComponentStatus,
@ -4778,7 +4725,6 @@ func init() {
convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource,
convert_v1_Binding_To_api_Binding,
convert_v1_Capabilities_To_api_Capabilities,
convert_v1_CinderVolumeSource_To_api_CinderVolumeSource,
convert_v1_ComponentCondition_To_api_ComponentCondition,
convert_v1_ComponentStatusList_To_api_ComponentStatusList,
convert_v1_ComponentStatus_To_api_ComponentStatus,

View File

@ -86,13 +86,6 @@ func deepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion.
return nil
}
func deepCopy_v1_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID
out.FSType = in.FSType
out.ReadOnly = in.ReadOnly
return nil
}
func deepCopy_v1_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
@ -1160,14 +1153,6 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste
} else {
out.ISCSI = nil
}
if in.Cinder != nil {
out.Cinder = new(CinderVolumeSource)
if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
return err
}
} else {
out.Cinder = nil
}
return nil
}
@ -2106,14 +2091,6 @@ func deepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.
} else {
out.RBD = nil
}
if in.Cinder != nil {
out.Cinder = new(CinderVolumeSource)
if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
return err
}
} else {
out.Cinder = nil
}
return nil
}
@ -2151,7 +2128,6 @@ func init() {
deepCopy_v1_AWSElasticBlockStoreVolumeSource,
deepCopy_v1_Binding,
deepCopy_v1_Capabilities,
deepCopy_v1_CinderVolumeSource,
deepCopy_v1_ComponentCondition,
deepCopy_v1_ComponentStatus,
deepCopy_v1_ComponentStatusList,

View File

@ -277,9 +277,6 @@ type VolumeSource struct {
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: http://releases.k8s.io/HEAD/examples/rbd/README.md
RBD *RBDVolumeSource `json:"rbd,omitempty"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@ -325,9 +322,6 @@ type PersistentVolumeSource struct {
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
Cinder *CinderVolumeSource `json:"cinder,omitempty"`
}
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
@ -576,24 +570,6 @@ type RBDVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// CinderVolumeSource represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
type CinderVolumeSource struct {
// volume id used to identify the volume in cinder
// More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID"`
// Required: Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Only ext3 and ext4 are allowed
// More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
FSType string `json:"fsType,omitempty"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
ReadOnly bool `json:"readOnly,omitempty"`
}
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)

View File

@ -69,17 +69,6 @@ func (Capabilities) SwaggerDoc() map[string]string {
return map_Capabilities
}
var map_CinderVolumeSource = map[string]string{
"": "CinderVolumeSource represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet.",
"volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"fsType": "Required: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Only ext3 and ext4 are allowed More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
}
func (CinderVolumeSource) SwaggerDoc() map[string]string {
return map_CinderVolumeSource
}
var map_ComponentCondition = map[string]string{
"": "Information about the condition of a component.",
"type": "Type of condition for a component. Valid value: \"Healthy\"",
@ -797,7 +786,6 @@ var map_PersistentVolumeSource = map[string]string{
"nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@ -1372,7 +1360,6 @@ var map_VolumeSource = map[string]string{
"glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md",
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
}
func (VolumeSource) SwaggerDoc() map[string]string {

View File

@ -362,10 +362,6 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
numVolumes++
allErrs = append(allErrs, validateRBD(source.RBD).Prefix("rbd")...)
}
if source.Cinder != nil {
numVolumes++
allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder).Prefix("cinder")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required"))
}
@ -489,17 +485,6 @@ func validateRBD(rbd *api.RBDVolumeSource) errs.ValidationErrorList {
return allErrs
}
func validateCinderVolumeSource(cd *api.CinderVolumeSource) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if cd.VolumeID == "" {
allErrs = append(allErrs, errs.NewFieldRequired("volumeID"))
}
if cd.FSType == "" || (cd.FSType != "ext3" && cd.FSType != "ext4") {
allErrs = append(allErrs, errs.NewFieldRequired("fsType required and should be of type ext3 or ext4"))
}
return allErrs
}
func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
@ -559,10 +544,6 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) errs.ValidationErrorList
numVolumes++
allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI).Prefix("iscsi")...)
}
if pv.Spec.Cinder != nil {
numVolumes++
allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder).Prefix("cinder")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", pv.Spec.PersistentVolumeSource, "exactly 1 volume type is required"))
}

View File

@ -459,13 +459,12 @@ func TestValidateVolumes(t *testing.T) {
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "my-secret"}}},
{Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host1", Path: "path", ReadOnly: false}}},
{Name: "rbd", VolumeSource: api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "bar", FSType: "ext4"}}},
{Name: "cinder", VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{"29ea5088-4f60-4757-962e-dba678767887", "ext4", false}}},
}
names, errs := validateVolumes(successCase)
if len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder") {
if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk") {
t.Errorf("wrong names result: %v", names)
}
emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}

View File

@ -22,16 +22,12 @@ import (
"io"
"net"
"net/http"
ossys "os"
"regexp"
"strings"
"time"
"code.google.com/p/gcfg"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/openstack"
"github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach"
"github.com/rackspace/gophercloud/openstack/compute/v2/flavors"
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members"
@ -767,157 +763,3 @@ func (os *OpenStack) GetZone() (cloudprovider.Zone, error) {
func (os *OpenStack) Routes() (cloudprovider.Routes, bool) {
return nil, false
}
// Attaches given cinder volume to the compute running kubelet
func (os *OpenStack) AttachDisk(diskName string) (string, error) {
disk, err := os.getVolume(diskName)
if err != nil {
return "", err
}
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
Region: os.region,
})
if err != nil || cClient == nil {
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
return "", err
}
compute_id, err := os.getComputeIDbyHostname(cClient)
if err != nil || len(compute_id) == 0 {
glog.Errorf("Unable to get minion's id by minion's hostname")
return "", err
}
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
if compute_id == disk.Attachments[0]["server_id"] {
glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, compute_id)
return disk.ID, nil
} else {
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
glog.Errorf(errMsg)
return "", errors.New(errMsg)
}
}
// add read only flag here if possible spothanis
_, err = volumeattach.Create(cClient, compute_id, &volumeattach.CreateOpts{
VolumeID: disk.ID,
}).Extract()
if err != nil {
glog.Errorf("Failed to attach %s volume to %s compute", diskName, compute_id)
return "", err
}
glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, compute_id)
return disk.ID, nil
}
// Detaches given cinder volume from the compute running kubelet
func (os *OpenStack) DetachDisk(partialDiskId string) error {
disk, err := os.getVolume(partialDiskId)
if err != nil {
return err
}
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
Region: os.region,
})
if err != nil || cClient == nil {
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
return err
}
compute_id, err := os.getComputeIDbyHostname(cClient)
if err != nil || len(compute_id) == 0 {
glog.Errorf("Unable to get compute id while detaching disk")
return err
}
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && compute_id == disk.Attachments[0]["server_id"] {
// This is a blocking call and effects kubelet's performance directly.
// We should consider kicking it out into a separate routine, if it is bad.
err = volumeattach.Delete(cClient, compute_id, disk.ID).ExtractErr()
if err != nil {
glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, compute_id, err)
return err
}
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, compute_id)
} else {
errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, compute_id)
glog.Errorf(errMsg)
return errors.New(errMsg)
}
return nil
}
// Takes a partial/full disk id or diskname
func (os *OpenStack) getVolume(diskName string) (volumes.Volume, error) {
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
Region: os.region,
})
var volume volumes.Volume
if err != nil || sClient == nil {
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
return volume, err
}
err = volumes.List(sClient, nil).EachPage(func(page pagination.Page) (bool, error) {
vols, err := volumes.ExtractVolumes(page)
if err != nil {
glog.Errorf("Failed to extract volumes: %v", err)
return false, err
} else {
for _, v := range vols {
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
if v.Name == diskName || strings.Contains(v.ID, diskName) {
volume = v
return true, nil
}
}
}
// if it reached here then no disk with the given name was found.
errmsg := fmt.Sprintf("Unable to find disk: %s in region %s", diskName, os.region)
return false, errors.New(errmsg)
})
if err != nil {
glog.Errorf("Error occured getting volume: %s", diskName)
return volume, err
}
return volume, err
}
func (os *OpenStack) getComputeIDbyHostname(cClient *gophercloud.ServiceClient) (string, error) {
hostname, err := ossys.Hostname()
if err != nil {
glog.Errorf("Failed to get Minion's hostname: %v", err)
return "", err
}
i, ok := os.Instances()
if !ok {
glog.Errorf("Unable to get instances")
return "", errors.New("Unable to get instances")
}
srvs, err := i.List(".")
if err != nil {
glog.Errorf("Failed to list servers: %v", err)
return "", err
}
if len(srvs) == 0 {
glog.Errorf("Found no servers in the region")
return "", errors.New("Found no servers in the region")
}
glog.V(4).Infof("found servers: %v", srvs)
for _, srvname := range srvs {
server, err := getServerByName(cClient, srvname)
if err != nil {
return "", err
} else {
if (server.Metadata["hostname"] != nil && server.Metadata["hostname"] == hostname) || (len(server.Name) > 0 && server.Name == hostname) {
glog.V(4).Infof("found server: %s with host :%s", server.Name, hostname)
return server.ID, nil
}
}
}
return "", fmt.Errorf("No server found matching hostname: %s", hostname)
}

View File

@ -20,19 +20,19 @@ import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/watch"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
)
// PersistentVolumeRecycler is a controller that watches for PersistentVolumes that are released from their claims.
@ -229,7 +229,3 @@ func (f *PersistentVolumeRecycler) NewWrapperBuilder(spec *volume.Spec, pod *api
func (f *PersistentVolumeRecycler) NewWrapperCleaner(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
return nil, fmt.Errorf("NewWrapperCleaner not supported by PVClaimBinder's VolumeHost implementation")
}
func (f *PersistentVolumeRecycler) GetCloudProvider() cloudprovider.Interface {
return nil
}

View File

@ -24,7 +24,6 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/cloudprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
@ -80,10 +79,6 @@ func (vh *volumeHost) NewWrapperCleaner(spec *volume.Spec, podUID types.UID, mou
return c, nil
}
func (vh *volumeHost) GetCloudProvider() cloudprovider.Interface {
return vh.kubelet.cloud
}
func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
if err != nil {

View File

@ -1,281 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"os"
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&cinderPlugin{nil}}
}
type cinderPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &cinderPlugin{}
const (
cinderVolumePluginName = "kubernetes.io/cinder"
)
func (plugin *cinderPlugin) Init(host volume.VolumeHost) {
plugin.host = host
}
func (plugin *cinderPlugin) Name() string {
return cinderVolumePluginName
}
func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
return spec.PersistentVolumeSource.Cinder != nil || spec.VolumeSource.Cinder != nil
}
func (plugin *cinderPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
}
}
func (plugin *cinderPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
return plugin.newBuilderInternal(spec, pod.UID, &CinderDiskUtil{}, mounter)
}
func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) {
var cinder *api.CinderVolumeSource
if spec.VolumeSource.Cinder != nil {
cinder = spec.VolumeSource.Cinder
} else {
cinder = spec.PersistentVolumeSource.Cinder
}
pdName := cinder.VolumeID
fsType := cinder.FSType
readOnly := cinder.ReadOnly
return &cinderVolumeBuilder{
cinderVolume: &cinderVolume{
podUID: podUID,
volName: spec.Name,
pdName: pdName,
mounter: mounter,
manager: manager,
plugin: plugin,
},
fsType: fsType,
readOnly: readOnly,
blockDeviceMounter: &cinderSafeFormatAndMount{mounter, exec.New()}}, nil
}
func (plugin *cinderPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
return plugin.newCleanerInternal(volName, podUID, &CinderDiskUtil{}, mounter)
}
func (plugin *cinderPlugin) newCleanerInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Cleaner, error) {
return &cinderVolumeCleaner{
&cinderVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}}, nil
}
// Abstract interface to PD operations.
type cdManager interface {
// Attaches the disk to the kubelet's host machine.
AttachDisk(builder *cinderVolumeBuilder, globalPDPath string) error
// Detaches the disk from the kubelet's host machine.
DetachDisk(cleaner *cinderVolumeCleaner) error
}
var _ volume.Builder = &cinderVolumeBuilder{}
type cinderVolumeBuilder struct {
*cinderVolume
fsType string
readOnly bool
blockDeviceMounter mount.Interface
}
// cinderPersistentDisk volumes are disk resources provided by C3
// that are attached to the kubelet's host machine and exposed to the pod.
type cinderVolume struct {
volName string
podUID types.UID
// Unique identifier of the volume, used to find the disk resource in the provider.
pdName string
// Filesystem type, optional.
fsType string
// Specifies the partition to mount
//partition string
// Specifies whether the disk will be attached as read-only.
readOnly bool
// Utility interface that provides API calls to the provider to attach/detach disks.
manager cdManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
// diskMounter provides the interface that is used to mount the actual block device.
blockDeviceMounter mount.Interface
plugin *cinderPlugin
}
func detachDiskLogError(cd *cinderVolume) {
err := cd.manager.DetachDisk(&cinderVolumeCleaner{cd})
if err != nil {
glog.Warningf("Failed to detach disk: %v (%v)", cd, err)
}
}
func (b *cinderVolumeBuilder) SetUp() error {
return b.SetUpAt(b.GetPath())
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *cinderVolumeBuilder) SetUpAt(dir string) error {
// TODO: handle failed mounts here.
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notmnt, err)
if err != nil && !os.IsNotExist(err) {
return err
}
if !notmnt {
return nil
}
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
if err := b.manager.AttachDisk(b, globalPDPath); err != nil {
return err
}
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
if err := os.MkdirAll(dir, 0750); err != nil {
// TODO: we should really eject the attach/detach out into its own control loop.
detachDiskLogError(b.cinderVolume)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
return err
}
}
os.Remove(dir)
// TODO: we should really eject the attach/detach out into its own control loop.
detachDiskLogError(b.cinderVolume)
return err
}
return nil
}
func (b *cinderVolumeBuilder) IsReadOnly() bool {
return b.readOnly
}
func makeGlobalPDName(host volume.VolumeHost, devName string) string {
return path.Join(host.GetPluginDir(cinderVolumePluginName), "mounts", devName)
}
func (cd *cinderVolume) GetPath() string {
name := cinderVolumePluginName
return cd.plugin.host.GetPodVolumeDir(cd.podUID, util.EscapeQualifiedNameForDisk(name), cd.volName)
}
type cinderVolumeCleaner struct {
*cinderVolume
}
var _ volume.Cleaner = &cinderVolumeCleaner{}
func (c *cinderVolumeCleaner) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *cinderVolumeCleaner) TearDownAt(dir string) error {
notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return err
}
if notmnt {
return os.Remove(dir)
}
refs, err := mount.GetMountRefs(c.mounter, dir)
if err != nil {
return err
}
if err := c.mounter.Unmount(dir); err != nil {
return err
}
glog.Infof("successfully unmounted: %s\n", dir)
// If refCount is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 {
c.pdName = path.Base(refs[0])
if err := c.manager.DetachDisk(c); err != nil {
return err
}
}
notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
if err := os.Remove(dir); err != nil {
return err
}
}
return nil
}

View File

@ -1,135 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"os"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
)
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.Name() != "kubernetes.io/cinder" {
t.Errorf("Wrong name: %s", plug.Name())
}
if !plug.CanSupport(&volume.Spec{
Name: "foo",
VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{Name: "foo", PersistentVolumeSource: api.PersistentVolumeSource{Cinder: &api.CinderVolumeSource{}}}) {
t.Errorf("Expected true")
}
}
type fakePDManager struct{}
func (fake *fakePDManager) AttachDisk(b *cinderVolumeBuilder, globalPDPath string) error {
globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
err := os.MkdirAll(globalPath, 0750)
if err != nil {
return err
}
return nil
}
func (fake *fakePDManager) DetachDisk(c *cinderVolumeCleaner) error {
globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
err := os.RemoveAll(globalPath)
if err != nil {
return err
}
return nil
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
Cinder: &api.CinderVolumeSource{
VolumeID: "pd",
FSType: "ext4",
},
},
}
builder, err := plug.(*cinderPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder: %v")
}
path := builder.GetPath()
if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~cinder/vol1" {
t.Errorf("Got unexpected path: %s", path)
}
if err := builder.SetUp(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
cleaner, err := plug.(*cinderPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err)
}
if cleaner == nil {
t.Errorf("Got a nil Cleaner: %v")
}
if err := cleaner.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}

View File

@ -1,212 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
)
type CinderDiskUtil struct{}
// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
// Mounts the disk to it's global path.
func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeBuilder, globalPDPath string) error {
options := []string{}
if b.readOnly {
options = append(options, "ro")
}
cloud := b.plugin.host.GetCloudProvider()
if cloud == nil {
glog.Errorf("Cloud provider not initialized properly")
return errors.New("Cloud provider not initialized properly")
}
diskid, err := cloud.(*openstack.OpenStack).AttachDisk(b.pdName)
if err != nil {
return err
}
var devicePath string
numTries := 0
for {
devicePath = makeDevicePath(diskid)
// probe the attached vol so that symlink in /dev/disk/by-id is created
probeAttachedVolume()
_, err := os.Stat(devicePath)
if err == nil {
break
}
if err != nil && !os.IsNotExist(err) {
return err
}
numTries++
if numTries == 10 {
return errors.New("Could not attach disk: Timeout after 60s")
}
time.Sleep(time.Second * 6)
}
notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return err
}
notmnt = true
} else {
return err
}
}
if notmnt {
err = b.blockDeviceMounter.Mount(devicePath, globalPDPath, b.fsType, options)
if err != nil {
os.Remove(globalPDPath)
return err
}
glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
}
return nil
}
func makeDevicePath(diskid string) string {
files, _ := ioutil.ReadDir("/dev/disk/by-id/")
for _, f := range files {
if strings.Contains(f.Name(), "virtio-") {
devid_prefix := f.Name()[len("virtio-"):len(f.Name())]
if strings.Contains(diskid, devid_prefix) {
glog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name()))
return path.Join("/dev/disk/by-id/", f.Name())
}
}
}
glog.Warningf("Failed to find device for the diskid: %q\n", diskid)
return ""
}
// Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeCleaner) error {
globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName)
if err := cd.mounter.Unmount(globalPDPath); err != nil {
return err
}
if err := os.Remove(globalPDPath); err != nil {
return err
}
glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
cloud := cd.plugin.host.GetCloudProvider()
if cloud == nil {
glog.Errorf("Cloud provider not initialized properly")
return errors.New("Cloud provider not initialized properly")
}
if err := cloud.(*openstack.OpenStack).DetachDisk(cd.pdName); err != nil {
return err
}
glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
return nil
}
type cinderSafeFormatAndMount struct {
mount.Interface
runner exec.Interface
}
/*
The functions below depend on the following executables; This will have to be ported to more generic implementations
/bin/lsblk
/sbin/mkfs.ext3 or /sbin/mkfs.ext4
/usr/bin/udevadm
*/
func (diskmounter *cinderSafeFormatAndMount) Mount(device string, target string, fstype string, options []string) error {
fmtRequired, err := isFormatRequired(device, fstype, diskmounter)
if err != nil {
glog.Warningf("Failed to determine if formating is required: %v\n", err)
//return err
}
if fmtRequired {
glog.V(2).Infof("Formatting of the vol required")
if _, err := formatVolume(device, fstype, diskmounter); err != nil {
glog.Warningf("Failed to format volume: %v\n", err)
return err
}
}
return diskmounter.Interface.Mount(device, target, fstype, options)
}
func isFormatRequired(devicePath string, fstype string, exec *cinderSafeFormatAndMount) (bool, error) {
args := []string{"-f", devicePath}
glog.V(4).Infof("exec-ing: /bin/lsblk %v\n", args)
cmd := exec.runner.Command("/bin/lsblk", args...)
dataOut, err := cmd.CombinedOutput()
if err != nil {
glog.Warningf("error running /bin/lsblk\n%s", string(dataOut))
return false, err
}
if len(string(dataOut)) > 0 {
if strings.Contains(string(dataOut), fstype) {
return false, nil
} else {
return true, nil
}
} else {
glog.Warningf("Failed to get any response from /bin/lsblk")
return false, errors.New("Failed to get reponse from /bin/lsblk")
}
glog.Warningf("Unknown error occured executing /bin/lsblk")
return false, errors.New("Unknown error occured executing /bin/lsblk")
}
func formatVolume(devicePath string, fstype string, exec *cinderSafeFormatAndMount) (bool, error) {
if "ext4" != fstype && "ext3" != fstype {
glog.Warningf("Unsupported format type: %q\n", fstype)
return false, errors.New(fmt.Sprint("Unsupported format type: %q\n", fstype))
}
args := []string{devicePath}
cmd := exec.runner.Command(fmt.Sprintf("/sbin/mkfs.%s", fstype), args...)
dataOut, err := cmd.CombinedOutput()
if err != nil {
glog.Warningf("error running /sbin/mkfs for fstype: %q \n%s", fstype, string(dataOut))
return false, err
}
glog.V(2).Infof("Successfully formated device: %q with fstype %q; output:\n %q\n,", devicePath, fstype, string(dataOut))
return true, err
}
func probeAttachedVolume() error {
executor := exec.New()
args := []string{"trigger"}
cmd := executor.Command("/usr/bin/udevadm", args...)
_, err := cmd.CombinedOutput()
if err != nil {
glog.Errorf("error running udevadm trigger %v\n", err)
return err
}
glog.V(4).Infof("Successfully probed all attachments")
return nil
}

View File

@ -1,82 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cinder
import (
"testing"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
)
func TestSafeFormatAndMount(t *testing.T) {
tests := []struct {
fstype string
expectedArgs []string
err error
}{
{
fstype: "ext4",
expectedArgs: []string{"/dev/foo", "/mnt/bar"},
},
{
fstype: "ext3",
expectedArgs: []string{"/dev/foo/blah", "/mnt/bar/blah"},
},
}
for _, test := range tests {
var cmdOut string
var argsOut []string
fake := exec.FakeExec{
CommandScript: []exec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
cmdOut = cmd
argsOut = args
fake := exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
func() ([]byte, error) { return []byte{}, test.err },
},
}
return exec.InitFakeCmd(&fake, cmd, args...)
},
},
}
mounter := cinderSafeFormatAndMount{
&mount.FakeMounter{},
&fake,
}
err := mounter.Mount("/dev/foo", "/mnt/bar", test.fstype, nil)
if test.err == nil && err != nil {
t.Errorf("unexpected error: %v", err)
}
if test.err != nil {
if err == nil {
t.Errorf("unexpected non-error")
}
return
}
if cmdOut != "/bin/lsblk" {
t.Errorf("unexpected command: %s", cmdOut)
}
if len(argsOut) != len(test.expectedArgs) {
t.Errorf("unexpected args: %v, expected: %v", argsOut, test.expectedArgs)
}
}
}

View File

@ -1,18 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cinder contains the internal representation of cinder volumes.
package cinder

View File

@ -24,7 +24,6 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/errors"
@ -122,9 +121,6 @@ type VolumeHost interface {
// the provided spec. See comments on NewWrapperBuilder for more
// context.
NewWrapperCleaner(spec *Spec, podUID types.UID, mounter mount.Interface) (Cleaner, error)
//Get cloud provider from kubelet
GetCloudProvider() cloudprovider.Interface
}
// VolumePluginMgr tracks registered plugins.

View File

@ -22,7 +22,6 @@ import (
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/mount"
@ -33,11 +32,10 @@ type fakeVolumeHost struct {
rootDir string
kubeClient client.Interface
pluginMgr VolumePluginMgr
cloud cloudprovider.Interface
}
func NewFakeVolumeHost(rootDir string, kubeClient client.Interface, plugins []VolumePlugin) *fakeVolumeHost {
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: nil}
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient}
host.pluginMgr.InitPlugins(plugins, host)
return host
}
@ -58,10 +56,6 @@ func (f *fakeVolumeHost) GetKubeClient() client.Interface {
return f.kubeClient
}
func (f *fakeVolumeHost) GetCloudProvider() cloudprovider.Interface {
return f.cloud
}
func (f *fakeVolumeHost) NewWrapperBuilder(spec *Spec, pod *api.Pod, opts VolumeOptions, mounter mount.Interface) (Builder, error) {
plug, err := f.pluginMgr.FindPluginBySpec(spec)
if err != nil {