Merge pull request #31869 from jsafrane/gluster-secrets

Automatic merge from submit-queue

Use secrets for glusterfs provisioning passwords

- no plain password in StorageClass!
- fix the style along the way
- use PV annotations to pass the configuration from provisioners to deleters, inspired by Ceph RBD provisioning.

~~Proposing 1.4:~~

~~- GlusterFS provisioning is a new 1.4 feature~~
~~- if we release GlusterFS provisioner as it is now, we need to support it's API (i.e. plaintext passwords) until 2.0~~
~~- it can break only GlusterFS provisioning, nothing else~~
~~- it's easy to revert~~

@kubernetes/sig-storage

fixes #31871
pull/6/head
Kubernetes Submit Queue 2016-09-27 07:32:09 -07:00 committed by GitHub
commit 81a1b0573b
5 changed files with 243 additions and 55 deletions

View File

@ -109,17 +109,22 @@ provisioner: kubernetes.io/glusterfs
parameters:
endpoint: "glusterfs-cluster"
resturl: "http://127.0.0.1:8081"
restauthenabled: "true"
restuser: "admin"
restuserkey: "password"
secretNamespace: "default"
secretName: "heketi-secret"
```
* `endpoint`: `glusterfs-cluster` is the endpoint name which includes GlusterFS trusted pool IP addresses. This parameter is mandatory. We need to also create a service for this endpoint, so that the endpoint will be persisted. This service can be without a selector to tell Kubernetes we want to add its endpoints manually. Please note that, glusterfs plugin looks for the endpoint in the pod namespace, so it is mandatory that the endpoint and service have to be created in Pod's namespace for successful mount of gluster volumes in the pod.
* `resturl` : Gluster REST service/Heketi service url which provision gluster volumes on demand. The general format should be `IPaddress:Port` and this is a mandatory parameter for GlusterFS dynamic provisioner. If Heketi service is exposed as a routable service in openshift/kubernetes setup, this can have a format similar to
`http://heketi-storage-project.cloudapps.mystorage.com` where the fqdn is a resolvable heketi service url.
* `restauthenabled` : Gluster REST service authentication boolean is required if the authentication is enabled on the REST server. If this value is 'true', 'restuser' and 'restuserkey' have to be filled.
* `restauthenabled` : Gluster REST service authentication boolean that enables authentication to the REST server. If this value is 'true', `restuser` and `restuserkey` or `secretNamespace` + `secretName` have to be filled. This option is deprecated, authentication is enabled when any of `restuser`, `restuserkey`, `secretName` or `secretNamespace` is specified.
* `restuser` : Gluster REST service/Heketi user who has access to create volumes in the Gluster Trusted Pool.
* `restuserkey` : Gluster REST service/Heketi user's password which will be used for authentication to the REST server.
* `restuserkey` : Gluster REST service/Heketi user's password which will be used for authentication to the REST server. This parameter is deprecated in favor of `secretNamespace` + `secretName`.
* `secretNamespace` + `secretName` : Identification of Secret instance that containes user password to use when talking to Gluster REST service. These parameters are optional, empty password will be used when both `secretNamespace` and `secretName` are omitted.
When both `restuserkey` and `secretNamespace` + `secretName` is specified, the secret will be used.
Example of a secret can be found in [glusterfs-provisioning-secret.yaml](glusterfs-provisioning-secret.yaml).
Reference : ([How to configure Heketi](https://github.com/heketi/heketi/wiki/Setting-up-the-topology))
@ -170,7 +175,7 @@ parameters:
```yaml
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
metadata:
name: fast
provisioner: kubernetes.io/rbd
parameters:
@ -416,7 +421,7 @@ Claim: myns/claim1
Reclaim Policy: Delete
Access Modes: RWO
Capacity: 3Gi
Message:
Message:
Source:
Type: RBD (a Rados Block Device mount on the host that shares a pod's lifetime)
CephMonitors: [127.0.0.1:6789]

View File

@ -6,6 +6,6 @@ provisioner: kubernetes.io/glusterfs
parameters:
endpoint: "glusterfs-cluster"
resturl: "http://127.0.0.1:8081"
restauthenabled: "true"
restuser: "admin"
restuserkey: "password"
secretNamespace: "default"
secretName: "heketi-secret"

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
namespace: default
data:
# base64 encoded password. E.g.: echo -n "mypassword" | base64
key: bXlwYXNzd29yZA==

View File

@ -18,31 +18,32 @@ package glusterfs
import (
"fmt"
"os"
"path"
dstrings "strings"
"github.com/golang/glog"
gcli "github.com/heketi/heketi/client/api/go-client"
gapi "github.com/heketi/heketi/pkg/glusterfs/api"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"os"
"path"
"strconv"
dstrings "strings"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&glusterfsPlugin{nil, exec.New(), new(glusterfsClusterConf)}}
return []volume.VolumePlugin{&glusterfsPlugin{nil, exec.New()}}
}
type glusterfsPlugin struct {
host volume.VolumeHost
exe exec.Interface
clusterconf *glusterfsClusterConf
host volume.VolumeHost
exe exec.Interface
}
var _ volume.VolumePlugin = &glusterfsPlugin{}
@ -53,10 +54,16 @@ var _ volume.Provisioner = &glusterfsVolumeProvisioner{}
var _ volume.Deleter = &glusterfsVolumeDeleter{}
const (
glusterfsPluginName = "kubernetes.io/glusterfs"
volprefix = "vol_"
replicacount = 3
durabilitytype = "replicate"
glusterfsPluginName = "kubernetes.io/glusterfs"
volprefix = "vol_"
replicacount = 3
durabilitytype = "replicate"
secretKeyName = "key" // key name used in secret
annGlusterURL = "glusterfs.kubernetes.io/url"
annGlusterSecretName = "glusterfs.kubernetes.io/secretname"
annGlusterSecretNamespace = "glusterfs.kubernetes.io/secretnamespace"
annGlusterUserKey = "glusterfs.kubernetes.io/userkey"
annGlusterUser = "glusterfs.kubernetes.io/userid"
)
func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error {
@ -346,18 +353,19 @@ func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptio
}, nil
}
type glusterfsClusterConf struct {
glusterep string
glusterRestvolpath string
glusterRestUrl string
glusterRestAuth bool
glusterRestUser string
glusterRestUserKey string
type provisioningConfig struct {
endpoint string
url string
user string
userKey string
secretNamespace string
secretName string
secretValue string
}
type glusterfsVolumeProvisioner struct {
*glusterfsMounter
*glusterfsClusterConf
provisioningConfig
options volume.VolumeOptions
}
@ -376,12 +384,15 @@ func (plugin *glusterfsPlugin) newDeleterInternal(spec *volume.Spec) (volume.Del
plugin: plugin,
},
path: spec.PersistentVolume.Spec.Glusterfs.Path,
}}, nil
},
spec: spec.PersistentVolume,
}, nil
}
type glusterfsVolumeDeleter struct {
*glusterfsMounter
*glusterfsClusterConf
provisioningConfig
spec *api.PersistentVolume
}
func (d *glusterfsVolumeDeleter) GetPath() string {
@ -391,21 +402,39 @@ func (d *glusterfsVolumeDeleter) GetPath() string {
func (d *glusterfsVolumeDeleter) Delete() error {
var err error
glog.V(2).Infof("glusterfs: delete volume :%s ", d.glusterfsMounter.path)
volumetodel := d.glusterfsMounter.path
d.glusterfsClusterConf = d.plugin.clusterconf
newvolumetodel := dstrings.TrimPrefix(volumetodel, volprefix)
cli := gcli.NewClient(d.glusterRestUrl, d.glusterRestUser, d.glusterRestUserKey)
glog.V(2).Infof("glusterfs: delete volume: %s ", d.glusterfsMounter.path)
volumeName := d.glusterfsMounter.path
volumeId := dstrings.TrimPrefix(volumeName, volprefix)
err = d.annotationsToParam(d.spec)
if err != nil {
return err
}
if len(d.secretName) > 0 {
d.secretValue, err = parseSecret(d.secretNamespace, d.secretName, d.plugin.host.GetKubeClient())
if err != nil {
glog.Errorf("glusterfs: failed to read secret: %v", err)
return err
}
} else if len(d.userKey) > 0 {
d.secretValue = d.userKey
} else {
d.secretValue = ""
}
glog.V(4).Infof("glusterfs: deleting volume %q with configuration %+v", volumeId, d.provisioningConfig)
cli := gcli.NewClient(d.url, d.user, d.secretValue)
if cli == nil {
glog.Errorf("glusterfs: failed to create gluster rest client")
return fmt.Errorf("glusterfs: failed to create gluster rest client, REST server authentication failed")
}
err = cli.VolumeDelete(newvolumetodel)
err = cli.VolumeDelete(volumeId)
if err != nil {
glog.V(4).Infof("glusterfs: error when deleting the volume :%s", err)
return err
}
glog.V(2).Infof("glusterfs: volume %s deleted successfully", volumetodel)
glog.V(2).Infof("glusterfs: volume %s deleted successfully", volumeName)
return nil
}
@ -417,30 +446,59 @@ func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error)
return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector")
}
glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", r.options)
authEnabled := true
for k, v := range r.options.Parameters {
switch dstrings.ToLower(k) {
case "endpoint":
r.plugin.clusterconf.glusterep = v
case "path":
r.plugin.clusterconf.glusterRestvolpath = v
r.endpoint = v
case "resturl":
r.plugin.clusterconf.glusterRestUrl = v
case "restauthenabled":
r.plugin.clusterconf.glusterRestAuth, err = strconv.ParseBool(v)
r.url = v
case "restuser":
r.plugin.clusterconf.glusterRestUser = v
r.user = v
case "restuserkey":
r.plugin.clusterconf.glusterRestUserKey = v
r.userKey = v
case "secretname":
r.secretName = v
case "secretnamespace":
r.secretNamespace = v
case "restauthenabled":
authEnabled = dstrings.ToLower(v) == "true"
default:
return nil, fmt.Errorf("glusterfs: invalid option %q for volume plugin %s", k, r.plugin.GetPluginName())
}
}
glog.V(4).Infof("glusterfs: storage class parameters in plugin clusterconf %v", r.plugin.clusterconf)
if !r.plugin.clusterconf.glusterRestAuth {
r.plugin.clusterconf.glusterRestUser = ""
r.plugin.clusterconf.glusterRestUserKey = ""
if len(r.url) == 0 {
return nil, fmt.Errorf("StorageClass for provisioner %q must contain 'resturl' parameter", r.plugin.GetPluginName())
}
r.glusterfsClusterConf = r.plugin.clusterconf
if len(r.endpoint) == 0 {
return nil, fmt.Errorf("StorageClass for provisioner %q must contain 'endpoint' parameter", r.plugin.GetPluginName())
}
if !authEnabled {
r.user = ""
r.secretName = ""
r.secretNamespace = ""
r.userKey = ""
r.secretValue = ""
}
if len(r.secretName) != 0 || len(r.secretNamespace) != 0 {
// secretName + Namespace has precedence over userKey
if len(r.secretName) != 0 && len(r.secretNamespace) != 0 {
r.secretValue, err = parseSecret(r.secretNamespace, r.secretName, r.plugin.host.GetKubeClient())
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("StorageClass for provisioner %q must have secretNamespace and secretName either both set or both empty", r.plugin.GetPluginName())
}
} else {
r.secretValue = r.userKey
}
glog.V(4).Infof("glusterfs: creating volume with configuration %+v", r.provisioningConfig)
glusterfs, sizeGB, err := r.CreateVolume()
if err != nil {
glog.Errorf("glusterfs: create volume err: %s.", err)
@ -453,18 +511,19 @@ func (r *glusterfsVolumeProvisioner) Provision() (*api.PersistentVolume, error)
pv.Spec.Capacity = api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
}
r.paramToAnnotations(pv)
return pv, nil
}
func (p *glusterfsVolumeProvisioner) CreateVolume() (r *api.GlusterfsVolumeSource, size int, err error) {
volSizeBytes := p.options.Capacity.Value()
sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024))
glog.V(2).Infof("glusterfs: create volume of size:%d bytes", volSizeBytes)
if p.glusterfsClusterConf.glusterRestUrl == "" {
glog.V(2).Infof("glusterfs: create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisioningConfig)
if p.url == "" {
glog.Errorf("glusterfs : rest server endpoint is empty")
return nil, 0, fmt.Errorf("failed to create gluster REST client, REST URL is empty")
}
cli := gcli.NewClient(p.glusterRestUrl, p.glusterRestUser, p.glusterRestUserKey)
cli := gcli.NewClient(p.url, p.user, p.secretValue)
if cli == nil {
glog.Errorf("glusterfs: failed to create gluster rest client")
return nil, 0, fmt.Errorf("failed to create gluster REST client, REST server authentication failed")
@ -475,10 +534,66 @@ func (p *glusterfsVolumeProvisioner) CreateVolume() (r *api.GlusterfsVolumeSourc
glog.Errorf("glusterfs: error creating volume %s ", err)
return nil, 0, fmt.Errorf("error creating volume %v", err)
}
glog.V(1).Infof("glusterfs: volume with size :%d and name:%s created", volume.Size, volume.Name)
glog.V(1).Infof("glusterfs: volume with size: %d and name: %s created", volume.Size, volume.Name)
return &api.GlusterfsVolumeSource{
EndpointsName: p.glusterfsClusterConf.glusterep,
EndpointsName: p.endpoint,
Path: volume.Name,
ReadOnly: false,
}, sz, nil
}
// parseSecret finds a given Secret instance and reads user password from it.
func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) {
secretMap, err := volutil.GetSecret(namespace, secretName, kubeClient)
if err != nil {
glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName)
return "", fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName)
}
if len(secretMap) == 0 {
return "", fmt.Errorf("empty secret map")
}
secret := ""
for k, v := range secretMap {
if k == secretKeyName {
return v, nil
}
secret = v
}
// If not found, the last secret in the map wins as done before
return secret, nil
}
// paramToAnnotations stores parameters needed to delete the volume in the PV
// annotations.
func (p *glusterfsVolumeProvisioner) paramToAnnotations(pv *api.PersistentVolume) {
ann := map[string]string{
annGlusterURL: p.url,
annGlusterUser: p.user,
annGlusterSecretName: p.secretName,
annGlusterSecretNamespace: p.secretNamespace,
annGlusterUserKey: p.userKey,
}
volutil.AddVolumeAnnotations(pv, ann)
}
// annotationsToParam parses annotations stored by paramToAnnotations
func (d *glusterfsVolumeDeleter) annotationsToParam(pv *api.PersistentVolume) error {
annKeys := []string{
annGlusterSecretName,
annGlusterSecretNamespace,
annGlusterURL,
annGlusterUser,
annGlusterUserKey,
}
params, err := volutil.ParseVolumeAnnotations(pv, annKeys)
if err != nil {
return err
}
d.url = params[annGlusterURL]
d.user = params[annGlusterUser]
d.userKey = params[annGlusterUserKey]
d.secretName = params[annGlusterSecretName]
d.secretNamespace = params[annGlusterSecretNamespace]
return nil
}

View File

@ -236,3 +236,63 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestAnnotations(t *testing.T) {
// Pass a provisioningConfigs through paramToAnnotations and back through
// annotationsToParam and check it did not change in the process.
tests := []provisioningConfig{
{
// Everything empty
},
{
// Everything with a value
url: "http://localhost",
user: "admin",
secretNamespace: "default",
secretName: "gluster-secret",
userKey: "mykey",
},
{
// No secret
url: "http://localhost",
user: "admin",
secretNamespace: "",
secretName: "",
userKey: "",
},
}
for i, test := range tests {
provisioner := &glusterfsVolumeProvisioner{
provisioningConfig: test,
}
deleter := &glusterfsVolumeDeleter{}
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pv",
},
}
provisioner.paramToAnnotations(pv)
err := deleter.annotationsToParam(pv)
if err != nil {
t.Errorf("test %d failed: %v", i, err)
}
if test.url != deleter.url {
t.Errorf("test %d failed: expected url %q, got %q", i, test.url, deleter.url)
}
if test.user != deleter.user {
t.Errorf("test %d failed: expected user %q, got %q", i, test.user, deleter.user)
}
if test.userKey != deleter.userKey {
t.Errorf("test %d failed: expected userKey %q, got %q", i, test.userKey, deleter.userKey)
}
if test.secretNamespace != deleter.secretNamespace {
t.Errorf("test %d failed: expected secretNamespace %q, got %q", i, test.secretNamespace, deleter.secretNamespace)
}
if test.secretName != deleter.secretName {
t.Errorf("test %d failed: expected secretName %q, got %q", i, test.secretName, deleter.secretName)
}
}
}