diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index e95ad50d89..22a1114fb9 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -429,7 +429,7 @@ type Volumes interface { // Attach the disk to the node with the specified NodeName // nodeName can be empty to mean "the instance on which we are running" // Returns the device (e.g. /dev/xvdf) where we attached the volume - AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) + AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) // Detach the disk from the node with the specified NodeName // nodeName can be empty to mean "the instance on which we are running" // Returns the device where the volume was attached @@ -1960,7 +1960,7 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error { } // AttachDisk implements Volumes.AttachDisk -func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { +func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) (string, error) { disk, err := newAWSDisk(c, diskName) if err != nil { return "", err @@ -1971,12 +1971,6 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName, return "", fmt.Errorf("error finding instance %s: %q", nodeName, err) } - if readOnly { - // TODO: We could enforce this when we mount the volume (?) - // TODO: We could also snapshot the volume and attach copies of it - return "", errors.New("AWS volumes cannot be mounted read-only") - } - // mountDevice will hold the device where we should try to attach the disk var mountDevice mountDevice // alreadyAttached is true if we have already called AttachVolume on this disk diff --git a/pkg/volume/aws_ebs/attacher.go b/pkg/volume/aws_ebs/attacher.go index 059431e166..ac716ed1b0 100644 --- a/pkg/volume/aws_ebs/attacher.go +++ b/pkg/volume/aws_ebs/attacher.go @@ -59,7 +59,7 @@ func (plugin *awsElasticBlockStorePlugin) GetDeviceMountRefs(deviceMountPath str } func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { - volumeSource, readOnly, err := getVolumeSource(spec) + volumeSource, _, err := getVolumeSource(spec) if err != nil { return "", err } @@ -68,7 +68,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName // awsCloud.AttachDisk checks if disk is already attached to node and // succeeds in that case, so no need to do that separately. - devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName, readOnly) + devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName) if err != nil { glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err) return "", err diff --git a/pkg/volume/aws_ebs/attacher_test.go b/pkg/volume/aws_ebs/attacher_test.go index 1076d06910..36ed854d1a 100644 --- a/pkg/volume/aws_ebs/attacher_test.go +++ b/pkg/volume/aws_ebs/attacher_test.go @@ -76,15 +76,14 @@ type testcase struct { func TestAttachDetach(t *testing.T) { diskName := aws.KubernetesVolumeID("disk") nodeName := types.NodeName("instance") - readOnly := false - spec := createVolSpec(diskName, readOnly) + spec := createVolSpec(diskName, false) attachError := errors.New("Fake attach error") detachError := errors.New("Fake detach error") tests := []testcase{ // Successful Attach call { name: "Attach_Positive", - attach: attachCall{diskName, nodeName, readOnly, "/dev/sda", nil}, + attach: attachCall{diskName, nodeName, "/dev/sda", nil}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) return attacher.Attach(spec, nodeName) @@ -95,7 +94,7 @@ func TestAttachDetach(t *testing.T) { // Attach call fails { name: "Attach_Negative", - attach: attachCall{diskName, nodeName, readOnly, "", attachError}, + attach: attachCall{diskName, nodeName, "", attachError}, test: func(testcase *testcase) (string, error) { attacher := newAttacher(testcase) return attacher.Attach(spec, nodeName) @@ -195,7 +194,6 @@ func createPVSpec(name aws.KubernetesVolumeID, readOnly bool) *volume.Spec { type attachCall struct { diskName aws.KubernetesVolumeID nodeName types.NodeName - readOnly bool retDeviceName string ret error } @@ -214,7 +212,7 @@ type diskIsAttachedCall struct { ret error } -func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { +func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) { expected := &testcase.attach if expected.diskName == "" && expected.nodeName == "" { @@ -234,12 +232,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t return "", errors.New("Unexpected AttachDisk call: wrong nodeName") } - if expected.readOnly != readOnly { - testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly) - return "", errors.New("Unexpected AttachDisk call: wrong readOnly") - } - - glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %q, %v", diskName, nodeName, readOnly, expected.retDeviceName, expected.ret) + glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret) return expected.retDeviceName, expected.ret } diff --git a/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go b/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go index c8cbc7ff4f..f04939152e 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go +++ b/plugin/pkg/admission/storage/persistentvolume/label/admission_test.go @@ -36,7 +36,7 @@ type mockVolumes struct { var _ aws.Volumes = &mockVolumes{} -func (v *mockVolumes) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) { +func (v *mockVolumes) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) { return "", fmt.Errorf("not implemented") }