API updates for Cinder Volumes to support for user specified Secrets in the future

pull/8/head
Davanum Srinivas 2018-05-14 17:09:05 -04:00
parent 0ecfd343b3
commit 5fa8a2412d
15 changed files with 145 additions and 35 deletions

View File

@ -61,6 +61,10 @@ func VisitPVSecretNames(pv *api.PersistentVolume, visitor Visitor) bool {
return false
}
}
case source.Cinder != nil:
if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Namespace, source.Cinder.SecretRef.Name, true /* kubeletVisible */) {
return false
}
case source.FlexVolume != nil:
if source.FlexVolume.SecretRef != nil {
// previously persisted PV objects use claimRef namespace

View File

@ -58,6 +58,12 @@ func TestPVSecrets(t *testing.T) {
CephFS: &api.CephFSPersistentVolumeSource{
SecretRef: &api.SecretReference{
Name: "Spec.PersistentVolumeSource.CephFS.SecretRef"}}}}},
{Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Cinder: &api.CinderPersistentVolumeSource{
SecretRef: &api.SecretReference{
Name: "Spec.PersistentVolumeSource.Cinder.SecretRef",
Namespace: "cinder"}}}}},
{Spec: api.PersistentVolumeSpec{
ClaimRef: &api.ObjectReference{Namespace: "claimrefns", Name: "claimrefname"},
PersistentVolumeSource: api.PersistentVolumeSource{
@ -159,6 +165,7 @@ func TestPVSecrets(t *testing.T) {
expectedSecretPaths := sets.NewString(
"Spec.PersistentVolumeSource.AzureFile.SecretName",
"Spec.PersistentVolumeSource.CephFS.SecretRef",
"Spec.PersistentVolumeSource.Cinder.SecretRef",
"Spec.PersistentVolumeSource.FlexVolume.SecretRef",
"Spec.PersistentVolumeSource.RBD.SecretRef",
"Spec.PersistentVolumeSource.ScaleIO.SecretRef",
@ -195,6 +202,8 @@ func TestPVSecrets(t *testing.T) {
"claimrefns/Spec.PersistentVolumeSource.CephFS.SecretRef",
"cephfs/Spec.PersistentVolumeSource.CephFS.SecretRef",
"cinder/Spec.PersistentVolumeSource.Cinder.SecretRef",
"claimrefns/Spec.PersistentVolumeSource.FlexVolume.SecretRef",
"flexns/Spec.PersistentVolumeSource.FlexVolume.SecretRef",

View File

@ -58,6 +58,10 @@ func VisitPodSecretNames(pod *api.Pod, visitor Visitor) bool {
if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) {
return false
}
case source.Cinder != nil:
if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) {
return false
}
case source.FlexVolume != nil:
if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) {
return false

View File

@ -64,6 +64,10 @@ func TestPodSecrets(t *testing.T) {
CephFS: &api.CephFSVolumeSource{
SecretRef: &api.LocalObjectReference{
Name: "Spec.Volumes[*].VolumeSource.CephFS.SecretRef"}}}}, {
VolumeSource: api.VolumeSource{
Cinder: &api.CinderVolumeSource{
SecretRef: &api.LocalObjectReference{
Name: "Spec.Volumes[*].VolumeSource.Cinder.SecretRef"}}}}, {
VolumeSource: api.VolumeSource{
FlexVolume: &api.FlexVolumeSource{
SecretRef: &api.LocalObjectReference{
@ -118,6 +122,7 @@ func TestPodSecrets(t *testing.T) {
"Spec.InitContainers[*].Env[*].ValueFrom.SecretKeyRef",
"Spec.Volumes[*].VolumeSource.AzureFile.SecretName",
"Spec.Volumes[*].VolumeSource.CephFS.SecretRef",
"Spec.Volumes[*].VolumeSource.Cinder.SecretRef",
"Spec.Volumes[*].VolumeSource.FlexVolume.SecretRef",
"Spec.Volumes[*].VolumeSource.Projected.Sources[*].Secret",
"Spec.Volumes[*].VolumeSource.RBD.SecretRef",

View File

@ -84,6 +84,10 @@ func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool {
if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) {
return false
}
case source.Cinder != nil:
if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) {
return false
}
case source.FlexVolume != nil:
if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) {
return false

View File

@ -233,6 +233,10 @@ func TestPodSecrets(t *testing.T) {
CephFS: &v1.CephFSVolumeSource{
SecretRef: &v1.LocalObjectReference{
Name: "Spec.Volumes[*].VolumeSource.CephFS.SecretRef"}}}}, {
VolumeSource: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
SecretRef: &v1.LocalObjectReference{
Name: "Spec.Volumes[*].VolumeSource.Cinder.SecretRef"}}}}, {
VolumeSource: v1.VolumeSource{
FlexVolume: &v1.FlexVolumeSource{
SecretRef: &v1.LocalObjectReference{
@ -287,6 +291,7 @@ func TestPodSecrets(t *testing.T) {
"Spec.InitContainers[*].Env[*].ValueFrom.SecretKeyRef",
"Spec.Volumes[*].VolumeSource.AzureFile.SecretName",
"Spec.Volumes[*].VolumeSource.CephFS.SecretRef",
"Spec.Volumes[*].VolumeSource.Cinder.SecretRef",
"Spec.Volumes[*].VolumeSource.FlexVolume.SecretRef",
"Spec.Volumes[*].VolumeSource.Projected.Sources[*].Secret",
"Spec.Volumes[*].VolumeSource.RBD.SecretRef",

View File

@ -193,7 +193,7 @@ type PersistentVolumeSource struct {
FlexVolume *FlexPersistentVolumeSource
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// +optional
Cinder *CinderVolumeSource
Cinder *CinderPersistentVolumeSource
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource
@ -999,6 +999,32 @@ type CinderVolumeSource struct {
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool
// Optional: points to a secret object containing parameters used to connect
// to OpenStack.
// +optional
SecretRef *LocalObjectReference
}
// Represents a cinder volume resource in Openstack. A Cinder volume
// must exist before mounting to a container. The volume must also be
// in the same region as the kubelet. Cinder volumes support ownership
// management and SELinux relabeling.
type CinderPersistentVolumeSource struct {
// Unique id of the volume used to identify the cinder volume
VolumeID string
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool
// Optional: points to a secret object containing parameters used to connect
// to OpenStack.
// +optional
SecretRef *SecretReference
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod

View File

@ -1196,6 +1196,27 @@ func validateCinderVolumeSource(cd *core.CinderVolumeSource, fldPath *field.Path
if len(cd.VolumeID) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
}
if cd.SecretRef != nil {
if len(cd.SecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
}
}
return allErrs
}
func validateCinderPersistentVolumeSource(cd *core.CinderPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(cd.VolumeID) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), ""))
}
if cd.SecretRef != nil {
if len(cd.SecretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), ""))
}
if len(cd.SecretRef.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), ""))
}
}
return allErrs
}
@ -1622,7 +1643,7 @@ func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList {
allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...)
allErrs = append(allErrs, validateCinderPersistentVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...)
}
}
if pv.Spec.FC != nil {

View File

@ -966,7 +966,17 @@ func printCinderVolumeSource(cinder *api.CinderVolumeSource, w PrefixWriter) {
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
cinder.VolumeID, cinder.FSType, cinder.ReadOnly)
" SecretRef:\t%v\n"+
cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef)
}
func printCinderPersistentVolumeSource(cinder *api.CinderPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n",
" SecretRef:\t%v\n"+
cinder.VolumeID, cinder.SecretRef, cinder.FSType, cinder.ReadOnly, cinder.SecretRef)
}
func printScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, w PrefixWriter) {
@ -1228,7 +1238,7 @@ func describePersistentVolume(pv *api.PersistentVolume, events *api.EventList) (
case pv.Spec.VsphereVolume != nil:
printVsphereVolumeSource(pv.Spec.VsphereVolume, w)
case pv.Spec.Cinder != nil:
printCinderVolumeSource(pv.Spec.Cinder, w)
printCinderPersistentVolumeSource(pv.Spec.Cinder, w)
case pv.Spec.AzureDisk != nil:
printAzureDiskVolumeSource(pv.Spec.AzureDisk, w)
case pv.Spec.PhotonPersistentDisk != nil:

View File

@ -1012,7 +1012,7 @@ func TestPersistentVolumeDescriber(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "bar"},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Cinder: &api.CinderVolumeSource{},
Cinder: &api.CinderPersistentVolumeSource{},
},
},
},

View File

@ -120,13 +120,11 @@ func (attacher *cinderDiskAttacher) waitDiskAttached(instanceID, volumeID string
}
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
volumeID := volumeSource.VolumeID
instanceID, err := attacher.nodeInstanceID(nodeName)
if err != nil {
return "", err
@ -175,15 +173,15 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
volumeSpecMap := make(map[string]*volume.Spec)
volumeIDList := []string{}
for _, spec := range specs {
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
volumeIDList = append(volumeIDList, volumeSource.VolumeID)
volumeIDList = append(volumeIDList, volumeID)
volumesAttachedCheck[spec] = true
volumeSpecMap[volumeSource.VolumeID] = spec
volumeSpecMap[volumeID] = spec
}
attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList)
@ -207,13 +205,11 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
volumeID := volumeSource.VolumeID
if devicePath == "" {
return "", fmt.Errorf("WaitForAttach failed for Cinder disk %q: devicePath is empty", volumeID)
}
@ -252,12 +248,12 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
return makeGlobalPDName(attacher.host, volumeSource.VolumeID), nil
return makeGlobalPDName(attacher.host, volumeID), nil
}
// FIXME: this method can be further pruned.
@ -275,7 +271,7 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
_, volumeFSType, readOnly, err := getVolumeInfo(spec)
if err != nil {
return err
}
@ -287,7 +283,7 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
if notMnt {
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(cinderVolumePluginName, attacher.host)
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeFSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err

View File

@ -393,7 +393,7 @@ func createPVSpec(name string, readOnly bool) *volume.Spec {
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: name,
ReadOnly: readOnly,
},

View File

@ -94,12 +94,12 @@ func (plugin *cinderPlugin) GetPluginName() string {
}
func (plugin *cinderPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return "", err
}
return volumeSource.VolumeID, nil
return volumeID, nil
}
func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
@ -129,14 +129,11 @@ func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.
}
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
cinder, readOnly, err := getVolumeSource(spec)
pdName, fsType, readOnly, err := getVolumeInfo(spec)
if err != nil {
return nil, err
}
pdName := cinder.VolumeID
fsType := cinder.FSType
return &cinderVolumeMounter{
cinderVolume: &cinderVolume{
podUID: podUID,
@ -248,7 +245,7 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*
var _ volume.ExpandableVolumePlugin = &cinderPlugin{}
func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
cinder, _, err := getVolumeSource(spec)
volumeID, _, _, err := getVolumeInfo(spec)
if err != nil {
return oldSize, err
}
@ -257,12 +254,12 @@ func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resour
return oldSize, err
}
expandedSize, err := cloud.ExpandVolume(cinder.VolumeID, oldSize, newSize)
expandedSize, err := cloud.ExpandVolume(volumeID, oldSize, newSize)
if err != nil {
return oldSize, err
}
glog.V(2).Infof("volume %s expanded to new size %d successfully", cinder.VolumeID, int(newSize.Value()))
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value()))
return expandedSize, nil
}
@ -532,7 +529,7 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Cinder: &v1.CinderVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: volumeID,
FSType: fstype,
ReadOnly: false,
@ -548,13 +545,13 @@ func (c *cinderVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
return pv, nil
}
func getVolumeSource(spec *volume.Spec) (*v1.CinderVolumeSource, bool, error) {
func getVolumeInfo(spec *volume.Spec) (string, string, bool, error) {
if spec.Volume != nil && spec.Volume.Cinder != nil {
return spec.Volume.Cinder, spec.Volume.Cinder.ReadOnly, nil
return spec.Volume.Cinder.VolumeID, spec.Volume.Cinder.FSType, spec.Volume.Cinder.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.Cinder != nil {
return spec.PersistentVolume.Spec.Cinder, spec.ReadOnly, nil
return spec.PersistentVolume.Spec.Cinder.VolumeID, spec.PersistentVolume.Spec.Cinder.FSType, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a Cinder volume type")
return "", "", false, fmt.Errorf("Spec does not reference a Cinder volume type")
}

View File

@ -51,7 +51,7 @@ func TestCanSupport(t *testing.T) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderVolumeSource{}}}}}) {
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{Cinder: &v1.CinderPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}

View File

@ -205,7 +205,7 @@ type PersistentVolumeSource struct {
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
@ -731,6 +731,35 @@ type CinderVolumeSource struct {
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// Optional: points to a secret object containing parameters used to connect
// to OpenStack.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderPersistentVolumeSource struct {
// volume id used to identify the volume in cinder
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// Optional: points to a secret object containing parameters used to connect
// to OpenStack.
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod