Merge pull request #10424 from markturansky/readonly_fix

Auto commit by PR queue bot
pull/6/head
Brendan Burns 2015-07-29 14:25:44 -07:00
commit 63cf00d24f
25 changed files with 495 additions and 36 deletions

View File

@ -45,6 +45,7 @@ type awsElasticBlockStorePlugin struct {
}
var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{}
var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{}
const (
awsElasticBlockStorePluginName = "kubernetes.io/aws-ebs"
@ -74,11 +75,16 @@ func (plugin *awsElasticBlockStorePlugin) NewBuilder(spec *volume.Spec, pod *api
}
func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Builder, error) {
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool
var ebs *api.AWSElasticBlockStoreVolumeSource
if spec.VolumeSource.AWSElasticBlockStore != nil {
ebs = spec.VolumeSource.AWSElasticBlockStore
readOnly = ebs.ReadOnly
} else {
ebs = spec.PersistentVolumeSource.AWSElasticBlockStore
readOnly = spec.ReadOnly
}
volumeID := ebs.VolumeID
@ -87,7 +93,6 @@ func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec,
if ebs.Partition != 0 {
partition = strconv.Itoa(ebs.Partition)
}
readOnly := ebs.ReadOnly
return &awsElasticBlockStore{
podUID: podUID,
@ -235,6 +240,10 @@ func (ebs *awsElasticBlockStore) SetUpAt(dir string) error {
return nil
}
func (pd *awsElasticBlockStore) IsReadOnly() bool {
return pd.readOnly
}
func makeGlobalPDPath(host volume.VolumeHost, volumeID string) string {
// Clean up the URI to be more fs-friendly
name := volumeID

View File

@ -21,6 +21,8 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
@ -157,3 +159,50 @@ func TestPlugin(t *testing.T) {
t.Errorf("SetUp() failed: %v", err)
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
o := testclient.NewObjects(api.Scheme, api.Scheme)
o.Add(pv)
o.Add(claim)
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil)
if !builder.IsReadOnly() {
t.Errorf("Expected true for builder.IsReadOnly")
}
}

View File

@ -143,6 +143,10 @@ func (ed *emptyDir) SetUpAt(dir string) error {
}
}
func (ed *emptyDir) IsReadOnly() bool {
return false
}
func (ed *emptyDir) setupDefault(dir string) error {
return os.MkdirAll(dir, 0750)
}

View File

@ -40,6 +40,7 @@ type gcePersistentDiskPlugin struct {
}
var _ volume.VolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{}
const (
gcePersistentDiskPluginName = "kubernetes.io/gce-pd"
@ -70,11 +71,17 @@ func (plugin *gcePersistentDiskPlugin) NewBuilder(spec *volume.Spec, pod *api.Po
}
func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) {
// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool
var gce *api.GCEPersistentDiskVolumeSource
if spec.VolumeSource.GCEPersistentDisk != nil {
gce = spec.VolumeSource.GCEPersistentDisk
readOnly = gce.ReadOnly
} else {
gce = spec.PersistentVolumeSource.GCEPersistentDisk
readOnly = spec.ReadOnly
}
pdName := gce.PDName
@ -83,7 +90,6 @@ func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, pod
if gce.Partition != 0 {
partition = strconv.Itoa(gce.Partition)
}
readOnly := gce.ReadOnly
return &gcePersistentDiskBuilder{
gcePersistentDisk: &gcePersistentDisk{
@ -223,6 +229,10 @@ func (b *gcePersistentDiskBuilder) SetUpAt(dir string) error {
return nil
}
func (b *gcePersistentDiskBuilder) IsReadOnly() bool {
return b.readOnly
}
func makeGlobalPDName(host volume.VolumeHost, devName string) string {
return path.Join(host.GetPluginDir(gcePersistentDiskPluginName), "mounts", devName)
}

View File

@ -21,6 +21,8 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
@ -171,3 +173,50 @@ func TestPlugin(t *testing.T) {
t.Errorf("Detach watch not called")
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
o := testclient.NewObjects(api.Scheme, api.Scheme)
o.Add(pv)
o.Add(claim)
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil)
if !builder.IsReadOnly() {
t.Errorf("Expected true for builder.IsReadOnly")
}
}

View File

@ -118,6 +118,10 @@ func (b *gitRepoVolumeBuilder) SetUp() error {
return b.SetUpAt(b.GetPath())
}
func (b *gitRepoVolumeBuilder) IsReadOnly() bool {
return false
}
// This is the spec for the volume that this plugin wraps.
var wrappedVolumeSpec = &volume.Spec{
Name: "not-used",

View File

@ -39,6 +39,7 @@ type glusterfsPlugin struct {
}
var _ volume.VolumePlugin = &glusterfsPlugin{}
var _ volume.PersistentVolumePlugin = &glusterfsPlugin{}
const (
glusterfsPluginName = "kubernetes.io/glusterfs"
@ -65,7 +66,7 @@ func (plugin *glusterfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode
}
func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
source := plugin.getGlusterVolumeSource(spec)
source, _ := plugin.getGlusterVolumeSource(spec)
ep_name := source.EndpointsName
ns := pod.Namespace
ep, err := plugin.host.GetKubeClient().Endpoints(ns).Get(ep_name)
@ -77,16 +78,18 @@ func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ vol
return plugin.newBuilderInternal(spec, ep, pod, mounter, exec.New())
}
func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) *api.GlusterfsVolumeSource {
func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*api.GlusterfsVolumeSource, bool) {
// Glusterfs volumes used directly in a pod have a ReadOnly flag set by the pod author.
// Glusterfs volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
if spec.VolumeSource.Glusterfs != nil {
return spec.VolumeSource.Glusterfs
return spec.VolumeSource.Glusterfs, spec.VolumeSource.Glusterfs.ReadOnly
} else {
return spec.PersistentVolumeSource.Glusterfs
return spec.PersistentVolumeSource.Glusterfs, spec.ReadOnly
}
}
func (plugin *glusterfsPlugin) newBuilderInternal(spec *volume.Spec, ep *api.Endpoints, pod *api.Pod, mounter mount.Interface, exe exec.Interface) (volume.Builder, error) {
source := plugin.getGlusterVolumeSource(spec)
source, readOnly := plugin.getGlusterVolumeSource(spec)
return &glusterfsBuilder{
glusterfs: &glusterfs{
volName: spec.Name,
@ -96,7 +99,7 @@ func (plugin *glusterfsPlugin) newBuilderInternal(spec *volume.Spec, ep *api.End
},
hosts: ep,
path: source.Path,
readonly: source.ReadOnly,
readOnly: readOnly,
exe: exe}, nil
}
@ -125,7 +128,7 @@ type glusterfsBuilder struct {
*glusterfs
hosts *api.Endpoints
path string
readonly bool
readOnly bool
exe exec.Interface
}
@ -158,6 +161,10 @@ func (b *glusterfsBuilder) SetUpAt(dir string) error {
return err
}
func (b *glusterfsBuilder) IsReadOnly() bool {
return b.readOnly
}
func (glusterfsVolume *glusterfs) GetPath() string {
name := glusterfsPluginName
return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.pod.UID, util.EscapeQualifiedNameForDisk(name), glusterfsVolume.volName)
@ -209,7 +216,7 @@ func (b *glusterfsBuilder) setUpAtInternal(dir string) error {
var errs error
options := []string{}
if b.readonly {
if b.readOnly {
options = append(options, "ro")
}

View File

@ -21,6 +21,8 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
@ -153,5 +155,63 @@ func TestPluginPersistentVolume(t *testing.T) {
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol))
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
ep := &api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: "ep",
},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{"foo", 80, api.ProtocolTCP}},
}},
}
o := testclient.NewObjects(api.Scheme, api.Scheme)
o.Add(pv)
o.Add(claim)
o.Add(ep)
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil)
if !builder.IsReadOnly() {
t.Errorf("Expected true for builder.IsReadOnly")
}
}

View File

@ -71,9 +71,15 @@ func (plugin *hostPathPlugin) GetAccessModes() []api.PersistentVolumeAccessMode
func (plugin *hostPathPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, _ mount.Interface) (volume.Builder, error) {
if spec.VolumeSource.HostPath != nil {
return &hostPathBuilder{&hostPath{spec.VolumeSource.HostPath.Path}}, nil
return &hostPathBuilder{
hostPath: &hostPath{path: spec.VolumeSource.HostPath.Path},
readOnly: false,
}, nil
} else {
return &hostPathBuilder{&hostPath{spec.PersistentVolumeSource.HostPath.Path}}, nil
return &hostPathBuilder{
hostPath: &hostPath{path: spec.PersistentVolumeSource.HostPath.Path},
readOnly: spec.ReadOnly,
}, nil
}
}
@ -104,6 +110,7 @@ func (hp *hostPath) GetPath() string {
type hostPathBuilder struct {
*hostPath
readOnly bool
}
var _ volume.Builder = &hostPathBuilder{}
@ -118,6 +125,14 @@ func (b *hostPathBuilder) SetUpAt(dir string) error {
return fmt.Errorf("SetUpAt() does not make sense for host paths")
}
func (b *hostPathBuilder) IsReadOnly() bool {
return b.readOnly
}
func (b *hostPathBuilder) GetPath() string {
return b.path
}
type hostPathCleaner struct {
*hostPath
}

View File

@ -20,6 +20,8 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
)
@ -142,3 +144,50 @@ func TestPlugin(t *testing.T) {
t.Errorf("Expected success, got: %v", err)
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{"foo"},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
o := testclient.NewObjects(api.Scheme, api.Scheme)
o.Add(pv)
o.Add(claim)
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil)
if !builder.IsReadOnly() {
t.Errorf("Expected true for builder.IsReadOnly")
}
}

View File

@ -39,6 +39,7 @@ type iscsiPlugin struct {
}
var _ volume.VolumePlugin = &iscsiPlugin{}
var _ volume.PersistentVolumePlugin = &iscsiPlugin{}
const (
iscsiPluginName = "kubernetes.io/iscsi"
@ -80,11 +81,16 @@ func (plugin *iscsiPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.
}
func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool
var iscsi *api.ISCSIVolumeSource
if spec.VolumeSource.ISCSI != nil {
iscsi = spec.VolumeSource.ISCSI
readOnly = iscsi.ReadOnly
} else {
iscsi = spec.PersistentVolumeSource.ISCSI
readOnly = spec.ReadOnly
}
lun := strconv.Itoa(iscsi.Lun)
@ -99,9 +105,8 @@ func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UI
manager: manager,
mounter: mounter,
plugin: plugin},
fsType: iscsi.FSType,
readOnly: iscsi.ReadOnly,
readOnly: readOnly,
}, nil
}
@ -178,6 +183,10 @@ type iscsiDiskCleaner struct {
var _ volume.Cleaner = &iscsiDiskCleaner{}
func (b *iscsiDiskBuilder) IsReadOnly() bool {
return b.readOnly
}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (c *iscsiDiskCleaner) TearDown() error {

View File

@ -21,6 +21,8 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
@ -193,5 +195,57 @@ func TestPluginPersistentVolume(t *testing.T) {
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol))
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
ISCSI: &api.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
o := testclient.NewObjects(api.Scheme, api.Scheme)
o.Add(pv)
o.Add(claim)
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(iscsiPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil)
if !builder.IsReadOnly() {
t.Errorf("Expected true for builder.IsReadOnly")
}
}

View File

@ -76,11 +76,13 @@ func (plugin *nfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.Vo
func (plugin *nfsPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Builder, error) {
var source *api.NFSVolumeSource
var readOnly bool
if spec.VolumeSource.NFS != nil {
source = spec.VolumeSource.NFS
readOnly = spec.VolumeSource.NFS.ReadOnly
} else {
source = spec.PersistentVolumeSource.NFS
readOnly = spec.ReadOnly
}
return &nfsBuilder{
nfs: &nfs{
@ -91,7 +93,8 @@ func (plugin *nfsPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mou
},
server: source.Server,
exportPath: source.Path,
readOnly: source.ReadOnly}, nil
readOnly: readOnly,
}, nil
}
func (plugin *nfsPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
@ -184,12 +187,22 @@ func (b *nfsBuilder) SetUpAt(dir string) error {
return nil
}
func (b *nfsBuilder) IsReadOnly() bool {
return b.readOnly
}
//
//func (c *nfsCleaner) GetPath() string {
// name := nfsPluginName
// return c.plugin.host.GetPodVolumeDir(c.pod.UID, util.EscapeQualifiedNameForDisk(name), c.volName)
//}
var _ volume.Cleaner = &nfsCleaner{}
type nfsCleaner struct {
*nfs
}
var _ volume.Cleaner = &nfsCleaner{}
func (c *nfsCleaner) TearDown() error {
return c.TearDownAt(c.GetPath())
}

View File

@ -21,6 +21,8 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
@ -199,5 +201,52 @@ func TestPluginPersistentVolume(t *testing.T) {
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol))
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
NFS: &api.NFSVolumeSource{},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
o := testclient.NewObjects(api.Scheme, api.Scheme)
o.Add(pv)
o.Add(claim)
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(nfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil)
if !builder.IsReadOnly() {
t.Errorf("Expected true for builder.IsReadOnly")
}
}

View File

@ -26,11 +26,12 @@ import (
)
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&persistentClaimPlugin{nil}}
return []volume.VolumePlugin{&persistentClaimPlugin{host: nil}}
}
type persistentClaimPlugin struct {
host volume.VolumeHost
host volume.VolumeHost
readOnly bool
}
var _ volume.VolumePlugin = &persistentClaimPlugin{}
@ -78,7 +79,7 @@ func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod,
return nil, err
}
builder, err := plugin.host.NewWrapperBuilder(volume.NewSpecFromPersistentVolume(pv), pod, opts, mounter)
builder, err := plugin.host.NewWrapperBuilder(volume.NewSpecFromPersistentVolume(pv, spec.ReadOnly), pod, opts, mounter)
if err != nil {
glog.Errorf("Error creating builder for claim: %+v\n", claim.Name)
return nil, err
@ -87,6 +88,10 @@ func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod,
return builder, nil
}
func (plugin *persistentClaimPlugin) IsReadOnly() bool {
return plugin.readOnly
}
func (plugin *persistentClaimPlugin) NewCleaner(_ string, _ types.UID, _ mount.Interface) (volume.Cleaner, error) {
return nil, fmt.Errorf("This will never be called directly. The PV backing this claim has a cleaner. Kubelet uses that cleaner, not this one, when removing orphaned volumes.")
}

View File

@ -134,6 +134,7 @@ type Spec struct {
Name string
VolumeSource api.VolumeSource
PersistentVolumeSource api.PersistentVolumeSource
ReadOnly bool
}
// NewSpecFromVolume creates an Spec from an api.Volume
@ -145,10 +146,11 @@ func NewSpecFromVolume(vs *api.Volume) *Spec {
}
// NewSpecFromPersistentVolume creates an Spec from an api.PersistentVolume
func NewSpecFromPersistentVolume(pv *api.PersistentVolume) *Spec {
func NewSpecFromPersistentVolume(pv *api.PersistentVolume, readOnly bool) *Spec {
return &Spec{
Name: pv.Name,
PersistentVolumeSource: pv.Spec.PersistentVolumeSource,
ReadOnly: readOnly,
}
}

View File

@ -43,7 +43,7 @@ func TestSpecSourceConverters(t *testing.T) {
},
}
converted = NewSpecFromPersistentVolume(pv)
converted = NewSpecFromPersistentVolume(pv, false)
if converted.PersistentVolumeSource.AWSElasticBlockStore == nil {
t.Errorf("Unexpected nil AWSElasticBlockStore: %+v", converted)
}

View File

@ -62,7 +62,7 @@ func diskSetUp(manager diskManager, b rbdBuilder, volPath string, mounter mount.
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if b.ReadOnly {
if b.IsReadOnly() {
options = append(options, "ro")
}
err = mounter.Mount(globalPDPath, volPath, "", options)

View File

@ -39,6 +39,7 @@ type rbdPlugin struct {
}
var _ volume.VolumePlugin = &rbdPlugin{}
var _ volume.PersistentVolumePlugin = &rbdPlugin{}
const (
rbdPluginName = "kubernetes.io/rbd"
@ -74,7 +75,7 @@ func (plugin *rbdPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
func (plugin *rbdPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
secret := ""
source := plugin.getRBDVolumeSource(spec)
source, _ := plugin.getRBDVolumeSource(spec)
if source.SecretRef != nil {
kubeClient := plugin.host.GetKubeClient()
@ -97,16 +98,18 @@ func (plugin *rbdPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.Vo
return plugin.newBuilderInternal(spec, pod.UID, &RBDUtil{}, mounter, secret)
}
func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) *api.RBDVolumeSource {
func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*api.RBDVolumeSource, bool) {
// rbd volumes used directly in a pod have a ReadOnly flag set by the pod author.
// rbd volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
if spec.VolumeSource.RBD != nil {
return spec.VolumeSource.RBD
return spec.VolumeSource.RBD, spec.VolumeSource.RBD.ReadOnly
} else {
return spec.PersistentVolumeSource.RBD
return spec.PersistentVolumeSource.RBD, spec.ReadOnly
}
}
func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) {
source := plugin.getRBDVolumeSource(spec)
source, readOnly := plugin.getRBDVolumeSource(spec)
pool := source.RBDPool
if pool == "" {
pool = "rbd"
@ -126,7 +129,7 @@ func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
volName: spec.Name,
Image: source.RBDImage,
Pool: pool,
ReadOnly: source.ReadOnly,
ReadOnly: readOnly,
manager: manager,
mounter: mounter,
plugin: plugin,
@ -213,6 +216,10 @@ type rbdCleaner struct {
var _ volume.Cleaner = &rbdCleaner{}
func (b *rbd) IsReadOnly() bool {
return b.ReadOnly
}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (c *rbdCleaner) TearDown() error {

View File

@ -21,6 +21,8 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
@ -151,5 +153,56 @@ func TestPluginPersistentVolume(t *testing.T) {
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol))
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
RBD: &api.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
o := testclient.NewObjects(api.Scheme, api.Scheme)
o.Add(pv)
o.Add(claim)
client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(rbdPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil)
if !builder.IsReadOnly() {
t.Errorf("Expected true for builder.IsReadOnly")
}
}

View File

@ -161,7 +161,7 @@ func (util *RBDUtil) loadRBD(rbd *rbd, mnt string) error {
func (util *RBDUtil) fencing(b rbdBuilder) error {
// no need to fence readOnly
if b.ReadOnly {
if b.IsReadOnly() {
return nil
}
return util.rbdLock(b, true)

View File

@ -168,6 +168,10 @@ func (b *secretVolumeBuilder) SetUpAt(dir string) error {
return nil
}
func (sv *secretVolume) IsReadOnly() bool {
return false
}
func totalSecretBytes(secret *api.Secret) int {
totalSize := 0
for _, bytes := range secret.Data {

View File

@ -127,6 +127,10 @@ func (fv *FakeVolume) SetUpAt(dir string) error {
return os.MkdirAll(dir, 0750)
}
func (fv *FakeVolume) IsReadOnly() bool {
return false
}
func (fv *FakeVolume) GetPath() string {
return path.Join(fv.Plugin.Host.GetPodVolumeDir(fv.PodUID, util.EscapeQualifiedNameForDisk(fv.Plugin.PluginName), fv.VolName))
}

View File

@ -41,6 +41,9 @@ type Builder interface {
// directory path, which may or may not exist yet. This may be called
// more than once, so implementations must be idempotent.
SetUpAt(dir string) error
// IsReadOnly is a flag that gives the builder's ReadOnly attribute.
// All persistent volumes have a private readOnly flag in their builders.
IsReadOnly() bool
}
// Cleaner interface provides methods to cleanup/unmount the volumes.

View File

@ -123,7 +123,7 @@ func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume
currentPhase := pv.Status.Phase
nextPhase := currentPhase
spec := volume.NewSpecFromPersistentVolume(pv)
spec := volume.NewSpecFromPersistentVolume(pv, false)
plugin, err := recycler.pluginMgr.FindRecyclablePluginBySpec(spec)
if err != nil {
return fmt.Errorf("Could not find recyclable volume plugin for spec: %+v", err)