Merge pull request #64403 from jsafrane/aws-read-only-attach

Automatic merge from submit-queue (batch tested with PRs 57082, 64325, 64016, 64443, 64403). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Allow AWS EBS volumes to be attached as ReadOnly.

**Which issue(s) this PR fixes**
Fixes #64402

**Special notes for your reviewer**:
This follows logic e.g. in Cinder volume plugin.

**Release note**:

```release-note
AWS EBS volumes can be now used as ReadOnly in pods.
```

/sig storage
/sig aws
pull/8/head
Kubernetes Submit Queue 2018-05-30 18:49:23 -07:00 committed by GitHub
commit a1c8d3f5f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 10 additions and 23 deletions

View File

@ -429,7 +429,7 @@ type Volumes interface {
// Attach the disk to the node with the specified NodeName // Attach the disk to the node with the specified NodeName
// nodeName can be empty to mean "the instance on which we are running" // nodeName can be empty to mean "the instance on which we are running"
// Returns the device (e.g. /dev/xvdf) where we attached the volume // Returns the device (e.g. /dev/xvdf) where we attached the volume
AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error)
// Detach the disk from the node with the specified NodeName // Detach the disk from the node with the specified NodeName
// nodeName can be empty to mean "the instance on which we are running" // nodeName can be empty to mean "the instance on which we are running"
// Returns the device where the volume was attached // Returns the device where the volume was attached
@ -1960,7 +1960,7 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error {
} }
// AttachDisk implements Volumes.AttachDisk // AttachDisk implements Volumes.AttachDisk
func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) {
disk, err := newAWSDisk(c, diskName) disk, err := newAWSDisk(c, diskName)
if err != nil { if err != nil {
return "", err return "", err
@ -1971,12 +1971,6 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName,
return "", fmt.Errorf("error finding instance %s: %q", nodeName, err) return "", fmt.Errorf("error finding instance %s: %q", nodeName, err)
} }
if readOnly {
// TODO: We could enforce this when we mount the volume (?)
// TODO: We could also snapshot the volume and attach copies of it
return "", errors.New("AWS volumes cannot be mounted read-only")
}
// mountDevice will hold the device where we should try to attach the disk // mountDevice will hold the device where we should try to attach the disk
var mountDevice mountDevice var mountDevice mountDevice
// alreadyAttached is true if we have already called AttachVolume on this disk // alreadyAttached is true if we have already called AttachVolume on this disk

View File

@ -59,7 +59,7 @@ func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath str
} }
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, readOnly, err := getVolumeSource(spec) volumeSource, _, err := getVolumeSource(spec)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -68,7 +68,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName
// awsCloud.AttachDisk checks if disk is already attached to node and // awsCloud.AttachDisk checks if disk is already attached to node and
// succeeds in that case, so no need to do that separately. // succeeds in that case, so no need to do that separately.
devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly) devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName)
if err != nil { if err != nil {
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err) glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err)
return "", err return "", err

View File

@ -76,15 +76,14 @@ type testcase struct {
func TestAttachDetach(t *testing.T) { func TestAttachDetach(t *testing.T) {
diskName := aws.KubernetesVolumeID("disk") diskName := aws.KubernetesVolumeID("disk")
nodeName := types.NodeName("instance") nodeName := types.NodeName("instance")
readOnly := false spec := createVolSpec(diskName, false)
spec := createVolSpec(diskName, readOnly)
attachError := errors.New("Fake attach error") attachError := errors.New("Fake attach error")
detachError := errors.New("Fake detach error") detachError := errors.New("Fake detach error")
tests := []testcase{ tests := []testcase{
// Successful Attach call // Successful Attach call
{ {
name: "Attach_Positive", name: "Attach_Positive",
attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil}, attach: attachCall{diskName, nodeName, "/dev/sda", nil},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName) return attacher.Attach(spec, nodeName)
@ -95,7 +94,7 @@ func TestAttachDetach(t *testing.T) {
// Attach call fails // Attach call fails
{ {
name: "Attach_Negative", name: "Attach_Negative",
attach: attachCall{diskName, nodeName, readOnly, "", attachError}, attach: attachCall{diskName, nodeName, "", attachError},
test: func(testcase *testcase) (string, error) { test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase) attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName) return attacher.Attach(spec, nodeName)
@ -195,7 +194,6 @@ func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec {
type attachCall struct { type attachCall struct {
diskName aws.KubernetesVolumeID diskName aws.KubernetesVolumeID
nodeName types.NodeName nodeName types.NodeName
readOnly bool
retDeviceName string retDeviceName string
ret error ret error
} }
@ -214,7 +212,7 @@ type diskIsAttachedCall struct {
ret error ret error
} }
func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
expected := &testcase.attach expected := &testcase.attach
if expected.diskName == "" && expected.nodeName == "" { if expected.diskName == "" && expected.nodeName == "" {
@ -234,12 +232,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t
return "", errors.New("Unexpected AttachDisk call: wrong nodeName") return "", errors.New("Unexpected AttachDisk call: wrong nodeName")
} }
if expected.readOnly != readOnly { glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret)
testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly)
return "", errors.New("Unexpected AttachDisk call: wrong readOnly")
}
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret)
return expected.retDeviceName, expected.ret return expected.retDeviceName, expected.ret
} }

View File

@ -36,7 +36,7 @@ type mockVolumes struct {
var _ aws.Volumes = &mockVolumes{} var _ aws.Volumes = &mockVolumes{}
func (v *mockVolumes) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { func (v *mockVolumes) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
return "", fmt.Errorf("not implemented") return "", fmt.Errorf("not implemented")
} }