mirror of https://github.com/k3s-io/k3s
verify invalid secret/configmap/projected volumes
And avoid calling Setup if they don't existpull/8/head
parent
94e59f1636
commit
85e337153a
|
@ -203,13 +203,6 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBytes := totalBytes(configMap)
|
||||
glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
|
@ -222,6 +215,29 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
setupSuccess := false
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Clean up directories if setup fails
|
||||
if !setupSuccess {
|
||||
unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID)
|
||||
if unmountCreateErr != nil {
|
||||
glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr)
|
||||
return
|
||||
}
|
||||
tearDownErr := unmounter.TearDown()
|
||||
if tearDownErr != nil {
|
||||
glog.Errorf("Error tearing down volume %s with : %v", b.volName, tearDownErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
|
@ -240,6 +256,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
setupSuccess = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -613,6 +613,66 @@ func volumeSpec(volumeName, configMapName string, defaultMode int32) *v1.Volume
|
|||
}
|
||||
}
|
||||
|
||||
func TestInvalidConfigMapSetup(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_configmap_namespace"
|
||||
testName = "test_configmap_name"
|
||||
|
||||
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
|
||||
configMap = configMap(testNamespace, testName)
|
||||
client = fake.NewSimpleClientset(&configMap)
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
tempDir, host = newTestHost(t, client)
|
||||
)
|
||||
volumeSpec.VolumeSource.ConfigMap.Items = []v1.KeyToPath{
|
||||
{Key: "missing", Path: "missing"},
|
||||
}
|
||||
|
||||
defer os.RemoveAll(tempDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to GetVolumeName: %v", err)
|
||||
}
|
||||
if vName != "test_volume_name/test_configmap_name" {
|
||||
t.Errorf("Got unexpect VolumeName %v", vName)
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
err = mounter.SetUp(&fsGroup)
|
||||
if err == nil {
|
||||
t.Errorf("Expected setup to fail")
|
||||
}
|
||||
_, err = os.Stat(volumePath)
|
||||
if err == nil {
|
||||
t.Errorf("Expected %s to not exist", volumePath)
|
||||
}
|
||||
|
||||
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
func configMap(namespace, name string) v1.ConfigMap {
|
||||
return v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
@ -185,6 +185,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
setupSuccess := false
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
|
@ -194,6 +195,21 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Clean up directories if setup fails
|
||||
if !setupSuccess {
|
||||
unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID)
|
||||
if unmountCreateErr != nil {
|
||||
glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr)
|
||||
return
|
||||
}
|
||||
tearDownErr := unmounter.TearDown()
|
||||
if tearDownErr != nil {
|
||||
glog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
|
@ -213,6 +229,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
setupSuccess = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -198,6 +198,8 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
glog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
setupSuccess := false
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -206,6 +208,21 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Clean up directories if setup fails
|
||||
if !setupSuccess {
|
||||
unmounter, unmountCreateErr := s.plugin.NewUnmounter(s.volName, s.podUID)
|
||||
if unmountCreateErr != nil {
|
||||
glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", s.volName, unmountCreateErr)
|
||||
return
|
||||
}
|
||||
tearDownErr := unmounter.TearDown()
|
||||
if tearDownErr != nil {
|
||||
glog.Errorf("error tearing down volume %s with : %v", s.volName, tearDownErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", s.pod.Namespace, s.pod.Name, s.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
|
@ -224,6 +241,7 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
setupSuccess = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -790,6 +790,56 @@ func TestPlugin(t *testing.T) {
|
|||
defer doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
|
||||
}
|
||||
|
||||
func TestInvalidPathProjected(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_projected_namespace"
|
||||
testName = "test_projected_name"
|
||||
|
||||
volumeSpec = makeVolumeSpec(testVolumeName, testName, 0644)
|
||||
secret = makeSecret(testNamespace, testName)
|
||||
client = fake.NewSimpleClientset(&secret)
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
rootDir, host = newTestHost(t, client)
|
||||
)
|
||||
volumeSpec.Projected.Sources[0].Secret.Items = []v1.KeyToPath{
|
||||
{Key: "missing", Path: "missing"},
|
||||
}
|
||||
|
||||
defer os.RemoveAll(rootDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(projectedPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~projected/%s", testVolumeName)) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error while setting up secret")
|
||||
}
|
||||
|
||||
_, err = os.Stat(volumePath)
|
||||
if err == nil {
|
||||
t.Errorf("Expected path %s to not exist", volumePath)
|
||||
}
|
||||
}
|
||||
|
||||
// Test the case where the plugin's ready file exists, but the volume dir is not a
|
||||
// mountpoint, which is the state the system will be in after reboot. The dir
|
||||
// should be mounter and the secret data written to it.
|
||||
|
|
|
@ -202,13 +202,6 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
totalBytes := totalSecretBytes(secret)
|
||||
glog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes",
|
||||
b.pod.Namespace,
|
||||
|
@ -221,6 +214,29 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
setupSuccess := false
|
||||
if err := wrapped.SetUpAt(dir, fsGroup); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Clean up directories if setup fails
|
||||
if !setupSuccess {
|
||||
unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID)
|
||||
if unmountCreateErr != nil {
|
||||
glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr)
|
||||
return
|
||||
}
|
||||
tearDownErr := unmounter.TearDown()
|
||||
if tearDownErr != nil {
|
||||
glog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName)
|
||||
writer, err := volumeutil.NewAtomicWriter(dir, writerContext)
|
||||
if err != nil {
|
||||
|
@ -239,6 +255,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||
glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup)
|
||||
return err
|
||||
}
|
||||
setupSuccess = true
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -362,6 +362,56 @@ func TestPlugin(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestInvalidPathSecret(t *testing.T) {
|
||||
var (
|
||||
testPodUID = types.UID("test_pod_uid")
|
||||
testVolumeName = "test_volume_name"
|
||||
testNamespace = "test_secret_namespace"
|
||||
testName = "test_secret_name"
|
||||
|
||||
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
|
||||
secret = secret(testNamespace, testName)
|
||||
client = fake.NewSimpleClientset(&secret)
|
||||
pluginMgr = volume.VolumePluginMgr{}
|
||||
rootDir, host = newTestHost(t, client)
|
||||
)
|
||||
volumeSpec.Secret.Items = []v1.KeyToPath{
|
||||
{Key: "missing", Path: "missing"},
|
||||
}
|
||||
|
||||
defer os.RemoveAll(rootDir)
|
||||
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host)
|
||||
|
||||
plugin, err := pluginMgr.FindPluginByName(secretPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
|
||||
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Fatalf("Got a nil Mounter")
|
||||
}
|
||||
|
||||
volumePath := mounter.GetPath()
|
||||
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~secret/test_volume_name")) {
|
||||
t.Errorf("Got unexpected path: %s", volumePath)
|
||||
}
|
||||
|
||||
err = mounter.SetUp(nil)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error while setting up secret")
|
||||
}
|
||||
|
||||
_, err = os.Stat(volumePath)
|
||||
if err == nil {
|
||||
t.Errorf("Expected path %s to not exist", volumePath)
|
||||
}
|
||||
}
|
||||
|
||||
// Test the case where the plugin's ready file exists, but the volume dir is not a
|
||||
// mountpoint, which is the state the system will be in after reboot. The dir
|
||||
// should be mounter and the secret data written to it.
|
||||
|
|
Loading…
Reference in New Issue