From 8fb1b71c6670b13669c365058546c9671c81b0c9 Mon Sep 17 00:00:00 2001 From: Jeff Peeler Date: Thu, 10 Nov 2016 17:33:06 -0500 Subject: [PATCH] Implements projected volume driver Proposal: kubernetes/kubernetes#35313 --- cmd/kubelet/app/plugins.go | 2 + hack/.linted_packages | 1 + hack/verify-flags/exceptions.txt | 8 + pkg/api/testing/fuzzer.go | 10 + pkg/api/types.go | 83 +- pkg/api/v1/defaults.go | 7 + pkg/api/v1/defaults_test.go | 22 + pkg/api/v1/types.go | 85 + pkg/api/validation/validation.go | 132 +- pkg/apis/extensions/types.go | 1 + pkg/kubelet/secret/secret_manager.go | 6 + pkg/security/podsecuritypolicy/util/util.go | 6 +- pkg/volume/configmap/configmap.go | 5 +- pkg/volume/configmap/configmap_test.go | 2 +- pkg/volume/downwardapi/downwardapi.go | 16 +- pkg/volume/projected/projected.go | 325 ++++ pkg/volume/projected/projected_test.go | 1046 +++++++++++++ pkg/volume/secret/secret.go | 5 +- pkg/volume/secret/secret_test.go | 2 +- test/e2e/common/projected.go | 1534 +++++++++++++++++++ 20 files changed, 3269 insertions(+), 29 deletions(-) create mode 100644 pkg/volume/projected/projected.go create mode 100644 pkg/volume/projected/projected_test.go create mode 100644 test/e2e/common/projected.go diff --git a/cmd/kubelet/app/plugins.go b/cmd/kubelet/app/plugins.go index 4dfc81724f..417193ec52 100644 --- a/cmd/kubelet/app/plugins.go +++ b/cmd/kubelet/app/plugins.go @@ -46,6 +46,7 @@ import ( "k8s.io/kubernetes/pkg/volume/iscsi" "k8s.io/kubernetes/pkg/volume/nfs" "k8s.io/kubernetes/pkg/volume/photon_pd" + "k8s.io/kubernetes/pkg/volume/projected" "k8s.io/kubernetes/pkg/volume/quobyte" "k8s.io/kubernetes/pkg/volume/rbd" "k8s.io/kubernetes/pkg/volume/secret" @@ -88,6 +89,7 @@ func ProbeVolumePlugins(pluginDir string) []volume.VolumePlugin { allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, photon_pd.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, projected.ProbeVolumePlugins()...) return allPlugins } diff --git a/hack/.linted_packages b/hack/.linted_packages index a6f14cfdba..c499b96661 100644 --- a/hack/.linted_packages +++ b/hack/.linted_packages @@ -243,6 +243,7 @@ pkg/util/yaml pkg/version/prometheus pkg/volume pkg/volume/downwardapi +pkg/volume/projected pkg/volume/quobyte pkg/volume/util/nestedpendingoperations pkg/volume/util/operationexecutor diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 8f0c861e9f..608f1b61b5 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -147,6 +147,14 @@ test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePat test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePathInReader), test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration), test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration), +test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volume/data-1"}, +test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"}, +test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"}, +test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"}, +test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"}, +test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"}, +test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"}, +test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath}, test/e2e/common/secrets.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/create/data-1"}, test/e2e/common/secrets.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/delete/data-1"}, test/e2e/common/secrets.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/update/data-3"}, diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index 1a6a204f8e..4e922e3e6f 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -267,6 +267,16 @@ func coreFuncs(t apitesting.TestingCommon) []interface{} { mode &= 0777 d.DefaultMode = &mode }, + func(s *api.ProjectedVolumeSource, c fuzz.Continue) { + c.FuzzNoCustom(s) // fuzz self without calling this function again + + // DefaultMode should always be set, it has a default + // value and it is expected to be between 0 and 0777 + var mode int32 + c.Fuzz(&mode) + mode &= 0777 + s.DefaultMode = &mode + }, func(k *api.KeyToPath, c fuzz.Continue) { c.FuzzNoCustom(k) // fuzz self without calling this function again k.Key = c.RandString() diff --git a/pkg/api/types.go b/pkg/api/types.go index e5ccc248ac..cdaa735aa2 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -294,6 +294,8 @@ type VolumeSource struct { AzureDisk *AzureDiskVolumeSource // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -746,7 +748,29 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 - // Specify whether the Secret or it's key must be defined + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined // +optional Optional *bool } @@ -927,6 +951,15 @@ type DownwardAPIVolumeFile struct { Mode *int32 } +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. type AzureFileVolumeSource struct { // the name of secret that contains Azure Storage Account Name and Key @@ -1017,6 +1050,54 @@ type ConfigMapVolumeSource struct { Optional *bool } +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection +} + // Maps a string key to a path within a volume. type KeyToPath struct { // The key to project. diff --git a/pkg/api/v1/defaults.go b/pkg/api/v1/defaults.go index 6d620bfbf1..632430fcd7 100644 --- a/pkg/api/v1/defaults.go +++ b/pkg/api/v1/defaults.go @@ -39,6 +39,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error { SetDefaults_SecretVolumeSource, SetDefaults_ConfigMapVolumeSource, SetDefaults_DownwardAPIVolumeSource, + SetDefaults_ProjectedVolumeSource, SetDefaults_Secret, SetDefaults_PersistentVolume, SetDefaults_PersistentVolumeClaim, @@ -218,6 +219,12 @@ func SetDefaults_Secret(obj *Secret) { obj.Type = SecretTypeOpaque } } +func SetDefaults_ProjectedVolumeSource(obj *ProjectedVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(ProjectedVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} func SetDefaults_PersistentVolume(obj *PersistentVolume) { if obj.Status.Phase == "" { obj.Status.Phase = VolumePending diff --git a/pkg/api/v1/defaults_test.go b/pkg/api/v1/defaults_test.go index 4deeb377c0..da2933f227 100644 --- a/pkg/api/v1/defaults_test.go +++ b/pkg/api/v1/defaults_test.go @@ -376,6 +376,28 @@ func TestSetDefaultDownwardAPIVolumeSource(t *testing.T) { } } +func TestSetDefaultProjectedVolumeSource(t *testing.T) { + s := v1.PodSpec{} + s.Volumes = []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{}, + }, + }, + } + pod := &v1.Pod{ + Spec: s, + } + output := roundTrip(t, runtime.Object(pod)) + pod2 := output.(*v1.Pod) + defaultMode := pod2.Spec.Volumes[0].VolumeSource.Projected.DefaultMode + expectedMode := v1.ProjectedVolumeSourceDefaultMode + + if defaultMode == nil || *defaultMode != expectedMode { + t.Errorf("Expected ProjectedVolumeSource DefaultMode %v, got %v", expectedMode, defaultMode) + } +} + func TestSetDefaultSecret(t *testing.T) { s := &v1.Secret{} obj2 := roundTrip(t, runtime.Object(s)) diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 529b5de79c..11f722ca6e 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -326,6 +326,8 @@ type VolumeSource struct { AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource `json:"projected,omitempty"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -944,6 +946,28 @@ const ( SecretVolumeSourceDefaultMode int32 = 0644 ) +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { @@ -1108,6 +1132,58 @@ const ( ConfigMapVolumeSourceDefaultMode int32 = 0644 ) +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection `json:"sources"` + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty"` +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection `json:"secret,omitempty"` + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty"` + // information about the configMap data to project + ConfigMap *ConfigMapProjection `json:"configMap,omitempty"` +} + +const ( + ProjectedVolumeSourceDefaultMode int32 = 0644 +) + // Maps a string key to a path within a volume. type KeyToPath struct { // The key to project. @@ -4095,6 +4171,15 @@ type DownwardAPIVolumeFile struct { Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"` } +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` +} + // SecurityContext holds security configuration that will be applied to a container. // Some fields are present in both SecurityContext and PodSecurityContext. When both // are set, the values in SecurityContext take precedence. diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index ded6d80def..01474a2e80 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -518,6 +518,14 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.E numVolumes++ allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...) } + if source.Projected != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("projected"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateProjectedVolumeSource(source.Projected, fldPath.Child("projected"))...) + } + } if numVolumes == 0 { allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) @@ -723,6 +731,30 @@ var validDownwardAPIFieldPathExpressions = sets.NewString( "metadata.labels", "metadata.annotations") +func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(file.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) + if file.FieldRef != nil { + allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + if file.ResourceFieldRef != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) + } + } else if file.ResourceFieldRef != nil { + allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) + } else { + allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) + } + if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, volumeModeErrorMsg)) + } + + return allErrs +} + func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -732,27 +764,99 @@ func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSou } for _, file := range downwardAPIVolume.Items { - if len(file.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) - if file.FieldRef != nil { - allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) - if file.ResourceFieldRef != nil { - allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) + allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath)...) + } + return allErrs +} + +func validateProjectionSources(projection *api.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allPaths := sets.String{} + + for _, source := range projection.Sources { + numSources := 0 + if source.Secret != nil { + if numSources > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) + } else { + numSources++ + if len(source.Secret.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + itemsPath := fldPath.Child("items") + for i, kp := range source.Secret.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + if len(kp.Path) > 0 { + curPath := kp.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, source.Secret.Name, "conflicting duplicate paths")) + } + } + } } - } else if file.ResourceFieldRef != nil { - allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) - } else { - allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) } - if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, volumeModeErrorMsg)) + if source.ConfigMap != nil { + if numSources > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type")) + } else { + numSources++ + if len(source.ConfigMap.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + itemsPath := fldPath.Child("items") + for i, kp := range source.ConfigMap.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + if len(kp.Path) > 0 { + curPath := kp.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, source.ConfigMap.Name, "conflicting duplicate paths")) + } + + } + } + } + } + if source.DownwardAPI != nil { + if numSources > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwardAPI"), "may not specify more than 1 volume type")) + } else { + numSources++ + for _, file := range source.DownwardAPI.Items { + allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath.Child("downwardAPI"))...) + if len(file.Path) > 0 { + curPath := file.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths")) + } + + } + } + } } } return allErrs } +func validateProjectedVolumeSource(projection *api.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + projectionMode := projection.DefaultMode + if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, volumeModeErrorMsg)) + } + + allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath)...) + return allErrs +} + // This validate will make sure targetPath: // 1. is not abs path // 2. does not have any element which is ".." diff --git a/pkg/apis/extensions/types.go b/pkg/apis/extensions/types.go index 0c0a03d99a..f2aa644f7c 100644 --- a/pkg/apis/extensions/types.go +++ b/pkg/apis/extensions/types.go @@ -905,6 +905,7 @@ var ( Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" PhotonPersistentDisk FSType = "photonPersistentDisk" + Projected FSType = "projected" All FSType = "*" ) diff --git a/pkg/kubelet/secret/secret_manager.go b/pkg/kubelet/secret/secret_manager.go index a2145e83ce..bb7cae93f0 100644 --- a/pkg/kubelet/secret/secret_manager.go +++ b/pkg/kubelet/secret/secret_manager.go @@ -279,6 +279,12 @@ func getSecretNames(pod *v1.Pod) sets.String { for i := range pod.Spec.Volumes { if source := pod.Spec.Volumes[i].Secret; source != nil { result.Insert(source.SecretName) + } else if source := pod.Spec.Volumes[i].Projected; source != nil { + for j := range source.Sources { + if secretVolumeSource := source.Sources[j].Secret; secretVolumeSource != nil { + result.Insert(secretVolumeSource.Name) + } + } } } return result diff --git a/pkg/security/podsecuritypolicy/util/util.go b/pkg/security/podsecuritypolicy/util/util.go index 95408ee8f9..1d34bba71e 100644 --- a/pkg/security/podsecuritypolicy/util/util.go +++ b/pkg/security/podsecuritypolicy/util/util.go @@ -61,7 +61,9 @@ func GetAllFSTypesAsSet() sets.String { string(extensions.VsphereVolume), string(extensions.Quobyte), string(extensions.AzureDisk), - string(extensions.PhotonPersistentDisk)) + string(extensions.PhotonPersistentDisk), + string(extensions.Projected), + ) return fstypes } @@ -114,6 +116,8 @@ func GetVolumeFSType(v api.Volume) (extensions.FSType, error) { return extensions.AzureDisk, nil case v.PhotonPersistentDisk != nil: return extensions.PhotonPersistentDisk, nil + case v.Projected != nil: + return extensions.Projected, nil } return "", fmt.Errorf("unknown volume type for volume: %#v", v) diff --git a/pkg/volume/configmap/configmap.go b/pkg/volume/configmap/configmap.go index b6b231a2e3..8b90863060 100644 --- a/pkg/volume/configmap/configmap.go +++ b/pkg/volume/configmap/configmap.go @@ -193,7 +193,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { len(configMap.Data), totalBytes) - payload, err := makePayload(b.source.Items, configMap, b.source.DefaultMode, optional) + payload, err := MakePayload(b.source.Items, configMap, b.source.DefaultMode, optional) if err != nil { return err } @@ -220,7 +220,8 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return nil } -func makePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) { +// Note: this function is exported so that it can be called from the projection volume driver +func MakePayload(mappings []v1.KeyToPath, configMap *v1.ConfigMap, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) { if defaultMode == nil { return nil, fmt.Errorf("No defaultMode used, not even the default value for it") } diff --git a/pkg/volume/configmap/configmap_test.go b/pkg/volume/configmap/configmap_test.go index 80fa9b1109..60682b751c 100644 --- a/pkg/volume/configmap/configmap_test.go +++ b/pkg/volume/configmap/configmap_test.go @@ -238,7 +238,7 @@ func TestMakePayload(t *testing.T) { } for _, tc := range cases { - actualPayload, err := makePayload(tc.mappings, tc.configMap, &tc.mode, tc.optional) + actualPayload, err := MakePayload(tc.mappings, tc.configMap, &tc.mode, tc.optional) if err != nil && tc.success { t.Errorf("%v: unexpected failure making payload: %v", tc.name, err) continue diff --git a/pkg/volume/downwardapi/downwardapi.go b/pkg/volume/downwardapi/downwardapi.go index 96548490b5..fa2dace72c 100644 --- a/pkg/volume/downwardapi/downwardapi.go +++ b/pkg/volume/downwardapi/downwardapi.go @@ -175,7 +175,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } - data, err := b.collectData(b.source.DefaultMode) + data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode) if err != nil { glog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err @@ -203,17 +203,19 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return nil } -// collectData collects requested downwardAPI in data map. +// CollectData collects requested downwardAPI in data map. // Map's key is the requested name of file to dump // Map's value is the (sorted) content of the field to be dumped in the file. -func (d *downwardAPIVolume) collectData(defaultMode *int32) (map[string]volumeutil.FileProjection, error) { +// +// Note: this function is exported so that it can be called from the projection volume driver +func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.VolumeHost, defaultMode *int32) (map[string]volumeutil.FileProjection, error) { if defaultMode == nil { return nil, fmt.Errorf("No defaultMode used, not even the default value for it") } errlist := []error{} data := make(map[string]volumeutil.FileProjection) - for _, fileInfo := range d.items { + for _, fileInfo := range items { var fileProjection volumeutil.FileProjection fPath := path.Clean(fileInfo.Path) if fileInfo.Mode != nil { @@ -223,7 +225,7 @@ func (d *downwardAPIVolume) collectData(defaultMode *int32) (map[string]volumeut } if fileInfo.FieldRef != nil { // TODO: unify with Kubelet.podFieldSelectorRuntimeValue - if values, err := fieldpath.ExtractFieldPathAsString(d.pod, fileInfo.FieldRef.FieldPath); err != nil { + if values, err := fieldpath.ExtractFieldPathAsString(pod, fileInfo.FieldRef.FieldPath); err != nil { glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error()) errlist = append(errlist, err) } else { @@ -231,10 +233,10 @@ func (d *downwardAPIVolume) collectData(defaultMode *int32) (map[string]volumeut } } else if fileInfo.ResourceFieldRef != nil { containerName := fileInfo.ResourceFieldRef.ContainerName - nodeAllocatable, err := d.plugin.host.GetNodeAllocatable() + nodeAllocatable, err := host.GetNodeAllocatable() if err != nil { errlist = append(errlist, err) - } else if values, err := fieldpath.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, d.pod, containerName, nodeAllocatable); err != nil { + } else if values, err := fieldpath.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil { glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error()) errlist = append(errlist, err) } else { diff --git a/pkg/volume/projected/projected.go b/pkg/volume/projected/projected.go new file mode 100644 index 0000000000..de39d12aa1 --- /dev/null +++ b/pkg/volume/projected/projected.go @@ -0,0 +1,325 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package projected + +import ( + "fmt" + "sort" + "strings" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/kubernetes/pkg/api/v1" + utilstrings "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/configmap" + "k8s.io/kubernetes/pkg/volume/downwardapi" + "k8s.io/kubernetes/pkg/volume/secret" + volumeutil "k8s.io/kubernetes/pkg/volume/util" +) + +// ProbeVolumePlugins is the entry point for plugin detection in a package. +func ProbeVolumePlugins() []volume.VolumePlugin { + return []volume.VolumePlugin{&projectedPlugin{}} +} + +const ( + projectedPluginName = "kubernetes.io/projected" +) + +type projectedPlugin struct { + host volume.VolumeHost + getSecret func(namespace, name string) (*v1.Secret, error) +} + +var _ volume.VolumePlugin = &projectedPlugin{} + +func wrappedVolumeSpec() volume.Spec { + return volume.Spec{ + Volume: &v1.Volume{ + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory}, + }, + }, + } +} + +func getPath(uid types.UID, volName string, host volume.VolumeHost) string { + return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedNameForDisk(projectedPluginName), volName) +} + +func (plugin *projectedPlugin) Init(host volume.VolumeHost) error { + plugin.host = host + plugin.getSecret = host.GetSecretFunc() + return nil +} + +func (plugin *projectedPlugin) GetPluginName() string { + return projectedPluginName +} + +func (plugin *projectedPlugin) GetVolumeName(spec *volume.Spec) (string, error) { + _, _, err := getVolumeSource(spec) + if err != nil { + return "", err + } + + return spec.Name(), nil +} + +func (plugin *projectedPlugin) CanSupport(spec *volume.Spec) bool { + return spec.Volume != nil && spec.Volume.Projected != nil +} + +func (plugin *projectedPlugin) RequiresRemount() bool { + return true +} + +func (plugin *projectedPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { + return &projectedVolumeMounter{ + projectedVolume: &projectedVolume{ + volName: spec.Name(), + sources: spec.Volume.Projected.Sources, + podUID: pod.UID, + plugin: plugin, + }, + source: *spec.Volume.Projected, + pod: pod, + opts: &opts, + }, nil +} + +func (plugin *projectedPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { + return &projectedVolumeUnmounter{ + &projectedVolume{ + volName: volName, + podUID: podUID, + plugin: plugin, + }, + }, nil +} + +func (plugin *projectedPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { + projectedVolume := &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{}, + }, + } + + return volume.NewSpecFromVolume(projectedVolume), nil +} + +type projectedVolume struct { + volName string + sources []v1.VolumeProjection + podUID types.UID + plugin *projectedPlugin + volume.MetricsNil +} + +var _ volume.Volume = &projectedVolume{} + +func (sv *projectedVolume) GetPath() string { + return getPath(sv.podUID, sv.volName, sv.plugin.host) +} + +type projectedVolumeMounter struct { + *projectedVolume + + source v1.ProjectedVolumeSource + pod *v1.Pod + opts *volume.VolumeOptions +} + +var _ volume.Mounter = &projectedVolumeMounter{} + +func (sv *projectedVolume) GetAttributes() volume.Attributes { + return volume.Attributes{ + ReadOnly: true, + Managed: true, + SupportsSELinux: true, + } + +} + +// Checks prior to mount operations to verify that the required components (binaries, etc.) +// to mount the volume are available on the underlying node. +// If not, it returns an error +func (s *projectedVolumeMounter) CanMount() error { + return nil +} + +func (s *projectedVolumeMounter) SetUp(fsGroup *int64) error { + return s.SetUpAt(s.GetPath(), fsGroup) +} + +func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { + glog.V(3).Infof("Setting up volume %v for pod %v at %v", s.volName, s.pod.UID, dir) + + wrapped, err := s.plugin.host.NewWrapperMounter(s.volName, wrappedVolumeSpec(), s.pod, *s.opts) + if err != nil { + return err + } + if err := wrapped.SetUpAt(dir, fsGroup); err != nil { + return err + } + + data, err := s.collectData() + if err != nil { + glog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error()) + } + + writerContext := fmt.Sprintf("pod %v/%v volume %v", s.pod.Namespace, s.pod.Name, s.volName) + writer, err := volumeutil.NewAtomicWriter(dir, writerContext) + if err != nil { + glog.Errorf("Error creating atomic writer: %v", err) + return err + } + + err = writer.Write(data) + if err != nil { + glog.Errorf("Error writing payload to dir: %v", err) + return err + } + + err = volume.SetVolumeOwnership(s, fsGroup) + if err != nil { + glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + return err + } + + return nil +} + +func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjection, error) { + if s.source.DefaultMode == nil { + return nil, fmt.Errorf("No defaultMode used, not even the default value for it") + } + + kubeClient := s.plugin.host.GetKubeClient() + if kubeClient == nil { + return nil, fmt.Errorf("Cannot setup projected volume %v because kube client is not configured", s.volName) + } + + errlist := []error{} + payload := make(map[string]volumeutil.FileProjection) + for _, source := range s.source.Sources { + if source.Secret != nil { + optional := source.Secret.Optional != nil && *source.Secret.Optional + secretapi, err := s.plugin.getSecret(s.pod.Namespace, source.Secret.Name) + if err != nil { + if !(errors.IsNotFound(err) && optional) { + glog.Errorf("Couldn't get secret %v/%v", s.pod.Namespace, source.Secret.Name) + errlist = append(errlist, err) + } + + secretapi = &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: s.pod.Namespace, + Name: source.Secret.Name, + }, + } + } + secretPayload, err := secret.MakePayload(source.Secret.Items, secretapi, s.source.DefaultMode, optional) + if err != nil { + glog.Errorf("Couldn't get secret %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) + errlist = append(errlist, err) + continue + } + + for k, v := range secretPayload { + payload[k] = v + } + } else if source.ConfigMap != nil { + optional := source.ConfigMap.Optional != nil && *source.ConfigMap.Optional + configMap, err := kubeClient.Core().ConfigMaps(s.pod.Namespace).Get(source.ConfigMap.Name, metav1.GetOptions{}) + if err != nil { + if !(errors.IsNotFound(err) && optional) { + glog.Errorf("Couldn't get configMap %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) + errlist = append(errlist, err) + continue + } + configMap = &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: s.pod.Namespace, + Name: source.ConfigMap.Name, + }, + } + } + configMapPayload, err := configmap.MakePayload(source.ConfigMap.Items, configMap, s.source.DefaultMode, optional) + if err != nil { + errlist = append(errlist, err) + continue + } + for k, v := range configMapPayload { + payload[k] = v + } + } else if source.DownwardAPI != nil { + downwardAPIPayload, err := downwardapi.CollectData(source.DownwardAPI.Items, s.pod, s.plugin.host, s.source.DefaultMode) + if err != nil { + errlist = append(errlist, err) + continue + } + for k, v := range downwardAPIPayload { + payload[k] = v + } + } + } + return payload, utilerrors.NewAggregate(errlist) +} + +func sortLines(values string) string { + splitted := strings.Split(values, "\n") + sort.Strings(splitted) + return strings.Join(splitted, "\n") +} + +type projectedVolumeUnmounter struct { + *projectedVolume +} + +var _ volume.Unmounter = &projectedVolumeUnmounter{} + +func (c *projectedVolumeUnmounter) TearDown() error { + return c.TearDownAt(c.GetPath()) +} + +func (c *projectedVolumeUnmounter) TearDownAt(dir string) error { + glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) + + wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID) + if err != nil { + return err + } + return wrapped.TearDownAt(dir) +} + +func getVolumeSource(spec *volume.Spec) (*v1.ProjectedVolumeSource, bool, error) { + var readOnly bool + var volumeSource *v1.ProjectedVolumeSource + + if spec.Volume != nil && spec.Volume.Projected != nil { + volumeSource = spec.Volume.Projected + readOnly = spec.ReadOnly + } + + return volumeSource, readOnly, fmt.Errorf("Spec does not reference a projected volume type") +} diff --git a/pkg/volume/projected/projected_test.go b/pkg/volume/projected/projected_test.go new file mode 100644 index 0000000000..a8d1f4a837 --- /dev/null +++ b/pkg/volume/projected/projected_test.go @@ -0,0 +1,1046 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package projected + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/empty_dir" + volumetest "k8s.io/kubernetes/pkg/volume/testing" + "k8s.io/kubernetes/pkg/volume/util" +) + +func TestCollectDataWithSecret(t *testing.T) { + caseMappingMode := int32(0400) + cases := []struct { + name string + mappings []v1.KeyToPath + secret *v1.Secret + mode int32 + optional bool + payload map[string]util.FileProjection + success bool + }{ + { + name: "no overrides", + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "foo": {Data: []byte("foo"), Mode: 0644}, + "bar": {Data: []byte("bar"), Mode: 0644}, + }, + success: true, + }, + { + name: "basic 1", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "path/to/foo.txt", + }, + }, + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "path/to/foo.txt": {Data: []byte("foo"), Mode: 0644}, + }, + success: true, + }, + { + name: "subdirs", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "path/to/1/2/3/foo.txt", + }, + }, + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644}, + }, + success: true, + }, + { + name: "subdirs 2", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "path/to/1/2/3/foo.txt", + }, + }, + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644}, + }, + success: true, + }, + { + name: "subdirs 3", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "path/to/1/2/3/foo.txt", + }, + { + Key: "bar", + Path: "another/path/to/the/esteemed/bar.bin", + }, + }, + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644}, + "another/path/to/the/esteemed/bar.bin": {Data: []byte("bar"), Mode: 0644}, + }, + success: true, + }, + { + name: "non existent key", + mappings: []v1.KeyToPath{ + { + Key: "zab", + Path: "path/to/foo.txt", + }, + }, + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + success: false, + }, + { + name: "mapping with Mode", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "foo.txt", + Mode: &caseMappingMode, + }, + { + Key: "bar", + Path: "bar.bin", + Mode: &caseMappingMode, + }, + }, + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "foo.txt": {Data: []byte("foo"), Mode: caseMappingMode}, + "bar.bin": {Data: []byte("bar"), Mode: caseMappingMode}, + }, + success: true, + }, + { + name: "mapping with defaultMode", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "foo.txt", + }, + { + Key: "bar", + Path: "bar.bin", + }, + }, + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "foo.txt": {Data: []byte("foo"), Mode: 0644}, + "bar.bin": {Data: []byte("bar"), Mode: 0644}, + }, + success: true, + }, + { + name: "optional non existent key", + mappings: []v1.KeyToPath{ + { + Key: "zab", + Path: "path/to/foo.txt", + }, + }, + secret: &v1.Secret{ + Data: map[string][]byte{ + "foo": []byte("foo"), + "bar": []byte("bar"), + }, + }, + mode: 0644, + optional: true, + payload: map[string]util.FileProjection{}, + success: true, + }, + } + + for _, tc := range cases { + testNamespace := "test_projected_namespace" + tc.secret.ObjectMeta = metav1.ObjectMeta{ + Namespace: testNamespace, + Name: tc.name, + } + + source := makeProjection(tc.name, tc.mode, "secret") + source.Sources[0].Secret.Items = tc.mappings + source.Sources[0].Secret.Optional = &tc.optional + + testPodUID := types.UID("test_pod_uid") + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + client := fake.NewSimpleClientset(tc.secret) + _, host := newTestHost(t, client) + + var myVolumeMounter = projectedVolumeMounter{ + projectedVolume: &projectedVolume{ + sources: source.Sources, + podUID: pod.UID, + plugin: &projectedPlugin{ + host: host, + getSecret: host.GetSecretFunc(), + }, + }, + source: *source, + pod: pod, + } + + actualPayload, err := myVolumeMounter.collectData() + if err != nil && tc.success { + t.Errorf("%v: unexpected failure making payload: %v", tc.name, err) + continue + } + if err == nil && !tc.success { + t.Errorf("%v: unexpected success making payload", tc.name) + continue + } + if !tc.success { + continue + } + if e, a := tc.payload, actualPayload; !reflect.DeepEqual(e, a) { + t.Errorf("%v: expected and actual payload do not match", tc.name) + } + } +} + +func TestCollectDataWithConfigMap(t *testing.T) { + caseMappingMode := int32(0400) + cases := []struct { + name string + mappings []v1.KeyToPath + configMap *v1.ConfigMap + mode int32 + optional bool + payload map[string]util.FileProjection + success bool + }{ + { + name: "no overrides", + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "foo": {Data: []byte("foo"), Mode: 0644}, + "bar": {Data: []byte("bar"), Mode: 0644}, + }, + success: true, + }, + { + name: "basic 1", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "path/to/foo.txt", + }, + }, + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "path/to/foo.txt": {Data: []byte("foo"), Mode: 0644}, + }, + success: true, + }, + { + name: "subdirs", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "path/to/1/2/3/foo.txt", + }, + }, + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644}, + }, + success: true, + }, + { + name: "subdirs 2", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "path/to/1/2/3/foo.txt", + }, + }, + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644}, + }, + success: true, + }, + { + name: "subdirs 3", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "path/to/1/2/3/foo.txt", + }, + { + Key: "bar", + Path: "another/path/to/the/esteemed/bar.bin", + }, + }, + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "path/to/1/2/3/foo.txt": {Data: []byte("foo"), Mode: 0644}, + "another/path/to/the/esteemed/bar.bin": {Data: []byte("bar"), Mode: 0644}, + }, + success: true, + }, + { + name: "non existent key", + mappings: []v1.KeyToPath{ + { + Key: "zab", + Path: "path/to/foo.txt", + }, + }, + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + success: false, + }, + { + name: "mapping with Mode", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "foo.txt", + Mode: &caseMappingMode, + }, + { + Key: "bar", + Path: "bar.bin", + Mode: &caseMappingMode, + }, + }, + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "foo.txt": {Data: []byte("foo"), Mode: caseMappingMode}, + "bar.bin": {Data: []byte("bar"), Mode: caseMappingMode}, + }, + success: true, + }, + { + name: "mapping with defaultMode", + mappings: []v1.KeyToPath{ + { + Key: "foo", + Path: "foo.txt", + }, + { + Key: "bar", + Path: "bar.bin", + }, + }, + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "foo.txt": {Data: []byte("foo"), Mode: 0644}, + "bar.bin": {Data: []byte("bar"), Mode: 0644}, + }, + success: true, + }, + { + name: "optional non existent key", + mappings: []v1.KeyToPath{ + { + Key: "zab", + Path: "path/to/foo.txt", + }, + }, + configMap: &v1.ConfigMap{ + Data: map[string]string{ + "foo": "foo", + "bar": "bar", + }, + }, + mode: 0644, + optional: true, + payload: map[string]util.FileProjection{}, + success: true, + }, + } + for _, tc := range cases { + testNamespace := "test_projected_namespace" + tc.configMap.ObjectMeta = metav1.ObjectMeta{ + Namespace: testNamespace, + Name: tc.name, + } + + source := makeProjection(tc.name, tc.mode, "configMap") + source.Sources[0].ConfigMap.Items = tc.mappings + source.Sources[0].ConfigMap.Optional = &tc.optional + + testPodUID := types.UID("test_pod_uid") + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + client := fake.NewSimpleClientset(tc.configMap) + _, host := newTestHost(t, client) + + var myVolumeMounter = projectedVolumeMounter{ + projectedVolume: &projectedVolume{ + sources: source.Sources, + podUID: pod.UID, + plugin: &projectedPlugin{ + host: host, + }, + }, + source: *source, + pod: pod, + } + + actualPayload, err := myVolumeMounter.collectData() + if err != nil && tc.success { + t.Errorf("%v: unexpected failure making payload: %v", tc.name, err) + continue + } + if err == nil && !tc.success { + t.Errorf("%v: unexpected success making payload", tc.name) + continue + } + if !tc.success { + continue + } + if e, a := tc.payload, actualPayload; !reflect.DeepEqual(e, a) { + t.Errorf("%v: expected and actual payload do not match", tc.name) + } + } +} + +func TestCollectDataWithDownwardAPI(t *testing.T) { + testNamespace := "test_projected_namespace" + testPodUID := types.UID("test_pod_uid") + testPodName := "podName" + + cases := []struct { + name string + volumeFile []v1.DownwardAPIVolumeFile + pod *v1.Pod + mode int32 + payload map[string]util.FileProjection + success bool + }{ + { + name: "labels", + volumeFile: []v1.DownwardAPIVolumeFile{ + {Path: "labels", FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "metadata.labels"}}}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPodName, + Namespace: testNamespace, + Labels: map[string]string{ + "key1": "value1", + "key2": "value2"}, + UID: testPodUID}, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "labels": {Data: []byte("key1=\"value1\"\nkey2=\"value2\""), Mode: 0644}, + }, + success: true, + }, + { + name: "annotations", + volumeFile: []v1.DownwardAPIVolumeFile{ + {Path: "annotations", FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "metadata.annotations"}}}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPodName, + Namespace: testNamespace, + Annotations: map[string]string{ + "a1": "value1", + "a2": "value2"}, + UID: testPodUID}, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "annotations": {Data: []byte("a1=\"value1\"\na2=\"value2\""), Mode: 0644}, + }, + success: true, + }, + { + name: "name", + volumeFile: []v1.DownwardAPIVolumeFile{ + {Path: "name_file_name", FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "metadata.name"}}}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPodName, + Namespace: testNamespace, + UID: testPodUID}, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "name_file_name": {Data: []byte(testPodName), Mode: 0644}, + }, + success: true, + }, + { + name: "namespace", + volumeFile: []v1.DownwardAPIVolumeFile{ + {Path: "namespace_file_name", FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "metadata.namespace"}}}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPodName, + Namespace: testNamespace, + UID: testPodUID}, + }, + mode: 0644, + payload: map[string]util.FileProjection{ + "namespace_file_name": {Data: []byte(testNamespace), Mode: 0644}, + }, + success: true, + }, + } + + for _, tc := range cases { + source := makeProjection("", tc.mode, "downwardAPI") + source.Sources[0].DownwardAPI.Items = tc.volumeFile + + client := fake.NewSimpleClientset(tc.pod) + _, host := newTestHost(t, client) + + var myVolumeMounter = projectedVolumeMounter{ + projectedVolume: &projectedVolume{ + sources: source.Sources, + podUID: tc.pod.UID, + plugin: &projectedPlugin{ + host: host, + }, + }, + source: *source, + pod: tc.pod, + } + + actualPayload, err := myVolumeMounter.collectData() + if err != nil && tc.success { + t.Errorf("%v: unexpected failure making payload: %v", tc.name, err) + continue + } + if err == nil && !tc.success { + t.Errorf("%v: unexpected success making payload", tc.name) + continue + } + if !tc.success { + continue + } + if e, a := tc.payload, actualPayload; !reflect.DeepEqual(e, a) { + t.Errorf("%v: expected and actual payload do not match", tc.name) + } + } +} + +func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) { + tempDir, err := ioutil.TempDir("/tmp", "projected_volume_test.") + if err != nil { + t.Fatalf("can't make a temp rootdir: %v", err) + } + + return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins()) +} + +func TestCanSupport(t *testing.T) { + pluginMgr := volume.VolumePluginMgr{} + tempDir, host := newTestHost(t, nil) + defer os.RemoveAll(tempDir) + pluginMgr.InitPlugins(ProbeVolumePlugins(), host) + + plugin, err := pluginMgr.FindPluginByName(projectedPluginName) + if err != nil { + t.Errorf("Can't find the plugin by name") + } + if plugin.GetPluginName() != projectedPluginName { + t.Errorf("Wrong name: %s", plugin.GetPluginName()) + } + if !plugin.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{Projected: &v1.ProjectedVolumeSource{}}}}) { + t.Errorf("Expected true") + } + if plugin.CanSupport(&volume.Spec{}) { + t.Errorf("Expected false") + } +} + +func TestPlugin(t *testing.T) { + var ( + testPodUID = types.UID("test_pod_uid") + testVolumeName = "test_volume_name" + testNamespace = "test_projected_namespace" + testName = "test_projected_name" + + volumeSpec = makeVolumeSpec(testVolumeName, testName, 0644) + secret = makeSecret(testNamespace, testName) + client = fake.NewSimpleClientset(&secret) + pluginMgr = volume.VolumePluginMgr{} + rootDir, host = newTestHost(t, client) + ) + defer os.RemoveAll(rootDir) + pluginMgr.InitPlugins(ProbeVolumePlugins(), host) + + plugin, err := pluginMgr.FindPluginByName(projectedPluginName) + if err != nil { + t.Errorf("Can't find the plugin by name") + } + + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) + if err != nil { + t.Errorf("Failed to make a new Mounter: %v", err) + } + if mounter == nil { + t.Errorf("Got a nil Mounter") + } + + volumePath := mounter.GetPath() + if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~projected/%s", testVolumeName)) { + t.Errorf("Got unexpected path: %s", volumePath) + } + + err = mounter.SetUp(nil) + if err != nil { + t.Errorf("Failed to setup volume: %v", err) + } + if _, err := os.Stat(volumePath); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, volume path not created: %s", volumePath) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + + // secret volume should create its own empty wrapper path + podWrapperMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid/plugins/kubernetes.io~empty-dir/wrapped_test_volume_name", rootDir) + + if _, err := os.Stat(podWrapperMetadataDir); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, empty-dir wrapper path is not created: %s", podWrapperMetadataDir) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + doTestSecretDataInVolume(volumePath, secret, t) + defer doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) +} + +// Test the case where the plugin's ready file exists, but the volume dir is not a +// mountpoint, which is the state the system will be in after reboot. The dir +// should be mounter and the secret data written to it. +func TestPluginReboot(t *testing.T) { + var ( + testPodUID = types.UID("test_pod_uid3") + testVolumeName = "test_volume_name" + testNamespace = "test_secret_namespace" + testName = "test_secret_name" + + volumeSpec = makeVolumeSpec(testVolumeName, testName, 0644) + secret = makeSecret(testNamespace, testName) + client = fake.NewSimpleClientset(&secret) + pluginMgr = volume.VolumePluginMgr{} + rootDir, host = newTestHost(t, client) + ) + defer os.RemoveAll(rootDir) + pluginMgr.InitPlugins(ProbeVolumePlugins(), host) + + plugin, err := pluginMgr.FindPluginByName(projectedPluginName) + if err != nil { + t.Errorf("Can't find the plugin by name") + } + + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) + if err != nil { + t.Errorf("Failed to make a new Mounter: %v", err) + } + if mounter == nil { + t.Errorf("Got a nil Mounter") + } + + podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~projected/test_volume_name", rootDir) + util.SetReady(podMetadataDir) + volumePath := mounter.GetPath() + if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~projected/test_volume_name")) { + t.Errorf("Got unexpected path: %s", volumePath) + } + + err = mounter.SetUp(nil) + if err != nil { + t.Errorf("Failed to setup volume: %v", err) + } + if _, err := os.Stat(volumePath); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, volume path not created: %s", volumePath) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + + doTestSecretDataInVolume(volumePath, secret, t) + doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) +} + +func TestPluginOptional(t *testing.T) { + var ( + testPodUID = types.UID("test_pod_uid") + testVolumeName = "test_volume_name" + testNamespace = "test_secret_namespace" + testName = "test_secret_name" + trueVal = true + + volumeSpec = makeVolumeSpec(testVolumeName, testName, 0644) + client = fake.NewSimpleClientset() + pluginMgr = volume.VolumePluginMgr{} + rootDir, host = newTestHost(t, client) + ) + volumeSpec.VolumeSource.Projected.Sources[0].Secret.Optional = &trueVal + defer os.RemoveAll(rootDir) + pluginMgr.InitPlugins(ProbeVolumePlugins(), host) + + plugin, err := pluginMgr.FindPluginByName(projectedPluginName) + if err != nil { + t.Errorf("Can't find the plugin by name") + } + + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) + if err != nil { + t.Errorf("Failed to make a new Mounter: %v", err) + } + if mounter == nil { + t.Errorf("Got a nil Mounter") + } + + volumePath := mounter.GetPath() + if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~projected/test_volume_name")) { + t.Errorf("Got unexpected path: %s", volumePath) + } + + err = mounter.SetUp(nil) + if err != nil { + t.Errorf("Failed to setup volume: %v", err) + } + if _, err := os.Stat(volumePath); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, volume path not created: %s", volumePath) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + + // secret volume should create its own empty wrapper path + podWrapperMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid/plugins/kubernetes.io~empty-dir/wrapped_test_volume_name", rootDir) + + if _, err := os.Stat(podWrapperMetadataDir); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, empty-dir wrapper path is not created: %s", podWrapperMetadataDir) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + + infos, err := ioutil.ReadDir(volumePath) + if err != nil { + t.Fatalf("couldn't find volume path, %s", volumePath) + } + if len(infos) != 0 { + t.Errorf("empty directory, %s, not found", volumePath) + } + + defer doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) +} + +func TestPluginOptionalKeys(t *testing.T) { + var ( + testPodUID = types.UID("test_pod_uid") + testVolumeName = "test_volume_name" + testNamespace = "test_secret_namespace" + testName = "test_secret_name" + trueVal = true + + volumeSpec = makeVolumeSpec(testVolumeName, testName, 0644) + secret = makeSecret(testNamespace, testName) + client = fake.NewSimpleClientset(&secret) + pluginMgr = volume.VolumePluginMgr{} + rootDir, host = newTestHost(t, client) + ) + volumeSpec.VolumeSource.Projected.Sources[0].Secret.Items = []v1.KeyToPath{ + {Key: "data-1", Path: "data-1"}, + {Key: "data-2", Path: "data-2"}, + {Key: "data-3", Path: "data-3"}, + {Key: "missing", Path: "missing"}, + } + volumeSpec.VolumeSource.Projected.Sources[0].Secret.Optional = &trueVal + defer os.RemoveAll(rootDir) + pluginMgr.InitPlugins(ProbeVolumePlugins(), host) + + plugin, err := pluginMgr.FindPluginByName(projectedPluginName) + if err != nil { + t.Errorf("Can't find the plugin by name") + } + + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) + if err != nil { + t.Errorf("Failed to make a new Mounter: %v", err) + } + if mounter == nil { + t.Errorf("Got a nil Mounter") + } + + volumePath := mounter.GetPath() + if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~projected/test_volume_name")) { + t.Errorf("Got unexpected path: %s", volumePath) + } + + err = mounter.SetUp(nil) + if err != nil { + t.Errorf("Failed to setup volume: %v", err) + } + if _, err := os.Stat(volumePath); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, volume path not created: %s", volumePath) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + + // secret volume should create its own empty wrapper path + podWrapperMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid/plugins/kubernetes.io~empty-dir/wrapped_test_volume_name", rootDir) + + if _, err := os.Stat(podWrapperMetadataDir); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, empty-dir wrapper path is not created: %s", podWrapperMetadataDir) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + doTestSecretDataInVolume(volumePath, secret, t) + defer doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) +} + +func makeVolumeSpec(volumeName, name string, defaultMode int32) *v1.Volume { + return &v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: makeProjection(name, defaultMode, "secret"), + }, + } +} + +func makeSecret(namespace, name string) v1.Secret { + return v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Data: map[string][]byte{ + "data-1": []byte("value-1"), + "data-2": []byte("value-2"), + "data-3": []byte("value-3"), + }, + } +} + +func configMap(namespace, name string) v1.ConfigMap { + return v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Data: map[string]string{ + "data-1": "value-1", + "data-2": "value-2", + "data-3": "value-3", + }, + } +} + +func makeProjection(name string, defaultMode int32, kind string) *v1.ProjectedVolumeSource { + var item v1.VolumeProjection + + switch kind { + case "configMap": + item = v1.VolumeProjection{ + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{Name: name}, + }, + } + case "secret": + item = v1.VolumeProjection{ + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{Name: name}, + }, + } + case "downwardAPI": + item = v1.VolumeProjection{ + DownwardAPI: &v1.DownwardAPIProjection{}, + } + } + + return &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{item}, + DefaultMode: &defaultMode, + } +} + +func doTestSecretDataInVolume(volumePath string, secret v1.Secret, t *testing.T) { + for key, value := range secret.Data { + secretDataHostPath := path.Join(volumePath, key) + if _, err := os.Stat(secretDataHostPath); err != nil { + t.Fatalf("SetUp() failed, couldn't find secret data on disk: %v", secretDataHostPath) + } else { + actualSecretBytes, err := ioutil.ReadFile(secretDataHostPath) + if err != nil { + t.Fatalf("Couldn't read secret data from: %v", secretDataHostPath) + } + + actualSecretValue := string(actualSecretBytes) + if string(value) != actualSecretValue { + t.Errorf("Unexpected value; expected %q, got %q", value, actualSecretValue) + } + } + } +} + +func doTestCleanAndTeardown(plugin volume.VolumePlugin, podUID types.UID, testVolumeName, volumePath string, t *testing.T) { + unmounter, err := plugin.NewUnmounter(testVolumeName, podUID) + if err != nil { + t.Errorf("Failed to make a new Unmounter: %v", err) + } + if unmounter == nil { + t.Errorf("Got a nil Unmounter") + } + + if err := unmounter.TearDown(); err != nil { + t.Errorf("Expected success, got: %v", err) + } + if _, err := os.Stat(volumePath); err == nil { + t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) + } else if !os.IsNotExist(err) { + t.Errorf("SetUp() failed: %v", err) + } +} diff --git a/pkg/volume/secret/secret.go b/pkg/volume/secret/secret.go index 983daba6f6..12e0141e17 100644 --- a/pkg/volume/secret/secret.go +++ b/pkg/volume/secret/secret.go @@ -208,7 +208,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { len(secret.Data), totalBytes) - payload, err := makePayload(b.source.Items, secret, b.source.DefaultMode, optional) + payload, err := MakePayload(b.source.Items, secret, b.source.DefaultMode, optional) if err != nil { return err } @@ -235,7 +235,8 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return nil } -func makePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) { +// Note: this function is exported so that it can be called from the projection volume driver +func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32, optional bool) (map[string]volumeutil.FileProjection, error) { if defaultMode == nil { return nil, fmt.Errorf("No defaultMode used, not even the default value for it") } diff --git a/pkg/volume/secret/secret_test.go b/pkg/volume/secret/secret_test.go index ee7e83fd75..e7e3bb9502 100644 --- a/pkg/volume/secret/secret_test.go +++ b/pkg/volume/secret/secret_test.go @@ -241,7 +241,7 @@ func TestMakePayload(t *testing.T) { } for _, tc := range cases { - actualPayload, err := makePayload(tc.mappings, tc.secret, &tc.mode, tc.optional) + actualPayload, err := MakePayload(tc.mappings, tc.secret, &tc.mode, tc.optional) if err != nil && tc.success { t.Errorf("%v: unexpected failure making payload: %v", tc.name, err) continue diff --git a/test/e2e/common/projected.go b/test/e2e/common/projected.go new file mode 100644 index 0000000000..b46f650f5d --- /dev/null +++ b/test/e2e/common/projected.go @@ -0,0 +1,1534 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + "os" + "path" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = framework.KubeDescribe("Projected", func() { + // Part 1/3 - Secrets + f := framework.NewDefaultFramework("projected") + + It("should be consumable from pods in volume [Conformance] [Volume]", func() { + doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) + }) + + It("should be consumable from pods in volume with defaultMode set [Conformance] [Volume]", func() { + defaultMode := int32(0400) + doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) + }) + + It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [Conformance] [Volume]", func() { + defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ + fsGroup := int64(1001) + uid := int64(1000) + doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &uid) + }) + + It("should be consumable from pods in volume with mappings [Conformance] [Volume]", func() { + doProjectedSecretE2EWithMapping(f, nil) + }) + + It("should be consumable from pods in volume with mappings and Item Mode set [Conformance] [Volume]", func() { + mode := int32(0400) + doProjectedSecretE2EWithMapping(f, &mode) + }) + + It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [Volume]", func() { + var ( + namespace2 *v1.Namespace + err error + secret2Name = "projected-secret-test-" + string(uuid.NewUUID()) + ) + + if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { + framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) + } + + secret2 := secretForTest(namespace2.Name, secret2Name) + secret2.Data = map[string][]byte{ + "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), + } + if secret2, err = f.ClientSet.Core().Secrets(namespace2.Name).Create(secret2); err != nil { + framework.Failf("unable to create test secret %s: %v", secret2.Name, err) + } + doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) + }) + + It("should be consumable in multiple volumes in a pod [Conformance] [Volume]", func() { + // This test ensures that the same secret can be mounted in multiple + // volumes in the same pod. This test case exists to prevent + // regressions that break this use-case. + var ( + name = "projected-secret-test-" + string(uuid.NewUUID()) + volumeName = "projected-secret-volume" + volumeMountPath = "/etc/projected-secret-volume" + volumeName2 = "projected-secret-volume-2" + volumeMountPath2 = "/etc/projected-secret-volume-2" + secret = secretForTest(f.Namespace.Name, name) + ) + + By(fmt.Sprintf("Creating secret with name %s", secret.Name)) + var err error + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + framework.Failf("unable to create test secret %s: %v", secret.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-secrets-" + string(uuid.NewUUID()), + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + }, + }, + }, + }, + }, + }, + { + Name: volumeName2, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "secret-volume-test", + Image: "gcr.io/google_containers/mounttest:0.8", + Args: []string{ + "--file_content=/etc/projected-secret-volume/data-1", + "--file_mode=/etc/projected-secret-volume/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumeMountPath, + ReadOnly: true, + }, + { + Name: volumeName2, + MountPath: volumeMountPath2, + ReadOnly: true, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + f.TestContainerOutput("consume secrets", pod, 0, []string{ + "content of file \"/etc/projected-secret-volume/data-1\": value-1", + "mode of file \"/etc/projected-secret-volume/data-1\": -rw-r--r--", + }) + }) + + It("optional updates should be reflected in volume [Conformance] [Volume]", func() { + + // We may have to wait or a full sync period to elapse before the + // Kubelet projects the update into the volume and the container picks + // it up. This timeout is based on the default Kubelet sync period (1 + // minute) plus additional time for fudge factor. + const podLogTimeout = 300 * time.Second + trueVal := true + + volumeMountPath := "/etc/projected-secret-volumes" + + deleteName := "s-test-opt-del-" + string(uuid.NewUUID()) + deleteContainerName := "dels-volume-test" + deleteVolumeName := "deletes-volume" + deleteSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: deleteName, + }, + Data: map[string][]byte{ + "data-1": []byte("value-1"), + }, + } + + updateName := "s-test-opt-upd-" + string(uuid.NewUUID()) + updateContainerName := "upds-volume-test" + updateVolumeName := "updates-volume" + updateSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: updateName, + }, + Data: map[string][]byte{ + "data-1": []byte("value-1"), + }, + } + + createName := "s-test-opt-create-" + string(uuid.NewUUID()) + createContainerName := "creates-volume-test" + createVolumeName := "creates-volume" + createSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: createName, + }, + Data: map[string][]byte{ + "data-1": []byte("value-1"), + }, + } + + By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) + var err error + if deleteSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { + framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) + } + + By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) + if updateSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { + framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-secrets-" + string(uuid.NewUUID()), + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: deleteVolumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: deleteName, + }, + Optional: &trueVal, + }, + }, + }, + }, + }, + }, + { + Name: updateVolumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: updateName, + }, + Optional: &trueVal, + }, + }, + }, + }, + }, + }, + { + Name: createVolumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: createName, + }, + Optional: &trueVal, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: deleteContainerName, + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: deleteVolumeName, + MountPath: path.Join(volumeMountPath, "delete"), + ReadOnly: true, + }, + }, + }, + { + Name: updateContainerName, + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: updateVolumeName, + MountPath: path.Join(volumeMountPath, "update"), + ReadOnly: true, + }, + }, + }, + { + Name: createContainerName, + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: createVolumeName, + MountPath: path.Join(volumeMountPath, "create"), + ReadOnly: true, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + By("Creating the pod") + f.PodClient().CreateSync(pod) + + pollCreateLogs := func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + } + Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1")) + + pollUpdateLogs := func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + } + Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3")) + + pollDeleteLogs := func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + } + Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) + + By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name)) + err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name) + + By(fmt.Sprintf("Updating secret %v", updateSecret.Name)) + updateSecret.ResourceVersion = "" // to force update + delete(updateSecret.Data, "data-1") + updateSecret.Data["data-3"] = []byte("value-3") + _, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Update(updateSecret) + Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name) + + By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) + if createSecret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(createSecret); err != nil { + framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) + } + + By("waiting to observe update in volume") + + Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) + Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3")) + Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1")) + }) + + // Part 2/3 - ConfigMaps + It("should be consumable from pods in volume [Conformance] [Volume]", func() { + doProjectedConfigMapE2EWithoutMappings(f, 0, 0, nil) + }) + + It("should be consumable from pods in volume with defaultMode set [Conformance] [Volume]", func() { + defaultMode := int32(0400) + doProjectedConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode) + }) + + It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [Feature:FSGroup] [Volume]", func() { + defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ + doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode) + }) + + It("should be consumable from pods in volume as non-root [Conformance] [Volume]", func() { + doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil) + }) + + It("should be consumable from pods in volume as non-root with FSGroup [Feature:FSGroup] [Volume]", func() { + doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, nil) + }) + + It("should be consumable from pods in volume with mappings [Conformance] [Volume]", func() { + doProjectedConfigMapE2EWithMappings(f, 0, 0, nil) + }) + + It("should be consumable from pods in volume with mappings and Item mode set[Conformance] [Volume]", func() { + mode := int32(0400) + doProjectedConfigMapE2EWithMappings(f, 0, 0, &mode) + }) + + It("should be consumable from pods in volume with mappings as non-root [Conformance] [Volume]", func() { + doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil) + }) + + It("should be consumable from pods in volume with mappings as non-root with FSGroup [Feature:FSGroup] [Volume]", func() { + doProjectedConfigMapE2EWithMappings(f, 1000, 1001, nil) + }) + + It("updates should be reflected in volume [Conformance] [Volume]", func() { + + // We may have to wait or a full sync period to elapse before the + // Kubelet projects the update into the volume and the container picks + // it up. This timeout is based on the default Kubelet sync period (1 + // minute) plus additional time for fudge factor. + const podLogTimeout = 300 * time.Second + + name := "projected-configmap-test-upd-" + string(uuid.NewUUID()) + volumeName := "projected-configmap-volume" + volumeMountPath := "/etc/projected-configmap-volume" + containerName := "projected-configmap-volume-test" + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: name, + }, + Data: map[string]string{ + "data-1": "value-1", + }, + } + + By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name)) + var err error + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-configmaps-" + string(uuid.NewUUID()), + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: containerName, + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volume/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumeMountPath, + ReadOnly: true, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + By("Creating the pod") + f.PodClient().CreateSync(pod) + + pollLogs := func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + } + + Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) + + By(fmt.Sprintf("Updating configmap %v", configMap.Name)) + configMap.ResourceVersion = "" // to force update + configMap.Data["data-1"] = "value-2" + _, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap) + Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name) + + By("waiting to observe update in volume") + Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2")) + }) + + It("optional updates should be reflected in volume [Conformance] [Volume]", func() { + + // We may have to wait or a full sync period to elapse before the + // Kubelet projects the update into the volume and the container picks + // it up. This timeout is based on the default Kubelet sync period (1 + // minute) plus additional time for fudge factor. + const podLogTimeout = 300 * time.Second + trueVal := true + + volumeMountPath := "/etc/projected-configmap-volumes" + + deleteName := "cm-test-opt-del-" + string(uuid.NewUUID()) + deleteContainerName := "delcm-volume-test" + deleteVolumeName := "deletecm-volume" + deleteConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: deleteName, + }, + Data: map[string]string{ + "data-1": "value-1", + }, + } + + updateName := "cm-test-opt-upd-" + string(uuid.NewUUID()) + updateContainerName := "updcm-volume-test" + updateVolumeName := "updatecm-volume" + updateConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: updateName, + }, + Data: map[string]string{ + "data-1": "value-1", + }, + } + + createName := "cm-test-opt-create-" + string(uuid.NewUUID()) + createContainerName := "createcm-volume-test" + createVolumeName := "createcm-volume" + createConfigMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: createName, + }, + Data: map[string]string{ + "data-1": "value-1", + }, + } + + By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) + var err error + if deleteConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { + framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) + } + + By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) + if updateConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { + framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-configmaps-" + string(uuid.NewUUID()), + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: deleteVolumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: deleteName, + }, + Optional: &trueVal, + }, + }, + }, + }, + }, + }, + { + Name: updateVolumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: updateName, + }, + Optional: &trueVal, + }, + }, + }, + }, + }, + }, + { + Name: createVolumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: createName, + }, + Optional: &trueVal, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: deleteContainerName, + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: deleteVolumeName, + MountPath: path.Join(volumeMountPath, "delete"), + ReadOnly: true, + }, + }, + }, + { + Name: updateContainerName, + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: updateVolumeName, + MountPath: path.Join(volumeMountPath, "update"), + ReadOnly: true, + }, + }, + }, + { + Name: createContainerName, + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: createVolumeName, + MountPath: path.Join(volumeMountPath, "create"), + ReadOnly: true, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + By("Creating the pod") + f.PodClient().CreateSync(pod) + + pollCreateLogs := func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + } + Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1")) + + pollUpdateLogs := func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + } + Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3")) + + pollDeleteLogs := func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + } + Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) + + By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name)) + err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name) + + By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name)) + updateConfigMap.ResourceVersion = "" // to force update + delete(updateConfigMap.Data, "data-1") + updateConfigMap.Data["data-3"] = "value-3" + _, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(updateConfigMap) + Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name) + + By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) + if createConfigMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { + framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) + } + + By("waiting to observe update in volume") + + Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) + Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3")) + Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1")) + }) + + It("should be consumable in multiple volumes in the same pod [Conformance] [Volume]", func() { + var ( + name = "projected-configmap-test-volume-" + string(uuid.NewUUID()) + volumeName = "projected-configmap-volume" + volumeMountPath = "/etc/projected-configmap-volume" + volumeName2 = "projected-configmap-volume-2" + volumeMountPath2 = "/etc/projected-configmap-volume-2" + configMap = newConfigMap(f, name) + ) + + By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) + var err error + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-configmaps-" + string(uuid.NewUUID()), + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + }, + }, + }, + }, + }, + }, + { + Name: volumeName2, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "projected-configmap-volume-test", + Image: "gcr.io/google_containers/mounttest:0.8", + Args: []string{"--file_content=/etc/projected-configmap-volume/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumeMountPath, + ReadOnly: true, + }, + { + Name: volumeName2, + MountPath: volumeMountPath2, + ReadOnly: true, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + f.TestContainerOutput("consume configMaps", pod, 0, []string{ + "content of file \"/etc/projected-configmap-volume/data-1\": value-1", + }) + + }) + + // Part 3/3 - DownwardAPI + // How long to wait for a log pod to be displayed + const podLogTimeout = 2 * time.Minute + var podClient *framework.PodClient + BeforeEach(func() { + podClient = f.PodClient() + }) + + It("should provide podname only [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname") + + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + fmt.Sprintf("%s\n", podName), + }) + }) + + It("should set DefaultMode on files [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + defaultMode := int32(0400) + pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podname", nil, &defaultMode) + + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + "mode of file \"/etc/podname\": -r--------", + }) + }) + + It("should set mode on item file [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + mode := int32(0400) + pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil) + + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + "mode of file \"/etc/podname\": -r--------", + }) + }) + + It("should provide podname as non-root with fsgroup [Feature:FSGroup] [Volume]", func() { + podName := "metadata-volume-" + string(uuid.NewUUID()) + uid := int64(1001) + gid := int64(1234) + pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname") + pod.Spec.SecurityContext = &v1.PodSecurityContext{ + RunAsUser: &uid, + FSGroup: &gid, + } + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + fmt.Sprintf("%s\n", podName), + }) + }) + + It("should provide podname as non-root with fsgroup and defaultMode [Feature:FSGroup] [Volume]", func() { + podName := "metadata-volume-" + string(uuid.NewUUID()) + uid := int64(1001) + gid := int64(1234) + mode := int32(0440) /* setting fsGroup sets mode to at least 440 */ + pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil) + pod.Spec.SecurityContext = &v1.PodSecurityContext{ + RunAsUser: &uid, + FSGroup: &gid, + } + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + "mode of file \"/etc/podname\": -r--r-----", + }) + }) + + It("should update labels on modification [Conformance] [Volume]", func() { + labels := map[string]string{} + labels["key1"] = "value1" + labels["key2"] = "value2" + + podName := "labelsupdate" + string(uuid.NewUUID()) + pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/labels") + containerName := "client-container" + By("Creating the pod") + podClient.CreateSync(pod) + + Eventually(func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) + }, + podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n")) + + //modify labels + podClient.Update(podName, func(pod *v1.Pod) { + pod.Labels["key3"] = "value3" + }) + + Eventually(func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + }, + podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n")) + }) + + It("should update annotations on modification [Conformance] [Volume]", func() { + annotations := map[string]string{} + annotations["builder"] = "bar" + podName := "annotationupdate" + string(uuid.NewUUID()) + pod := projectedDownwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/annotations") + + containerName := "client-container" + By("Creating the pod") + podClient.CreateSync(pod) + + pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name) + + Eventually(func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + }, + podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n")) + + //modify annotations + podClient.Update(podName, func(pod *v1.Pod) { + pod.Annotations["builder"] = "foo" + }) + + Eventually(func() (string, error) { + return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + }, + podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n")) + }) + + It("should provide container's cpu limit [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_limit") + + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + fmt.Sprintf("2\n"), + }) + }) + + It("should provide container's memory limit [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_limit") + + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + fmt.Sprintf("67108864\n"), + }) + }) + + It("should provide container's cpu request [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_request") + + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + fmt.Sprintf("1\n"), + }) + }) + + It("should provide container's memory request [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_request") + + f.TestContainerOutput("downward API volume plugin", pod, 0, []string{ + fmt.Sprintf("33554432\n"), + }) + }) + + It("should provide node allocatable (cpu) as default cpu limit if the limit is not set [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/cpu_limit") + + f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"}) + }) + + It("should provide node allocatable (memory) as default memory limit if the limit is not set [Conformance] [Volume]", func() { + podName := "downwardapi-volume-" + string(uuid.NewUUID()) + pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/memory_limit") + + f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"}) + }) + + // Test multiple projections + It("should project all components that make up the projection API [Conformance] [Volume] [Projection]", func() { + var err error + podName := "projected-volume-" + string(uuid.NewUUID()) + secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID()) + configMapName := "configmap-projected-all-test-volume-" + string(uuid.NewUUID()) + configMap := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: configMapName, + }, + Data: map[string]string{ + "configmap-data": "configmap-value-1", + }, + } + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: secretName, + }, + Data: map[string][]byte{ + "secret-data": []byte("secret-value-1"), + }, + } + + By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + } + By(fmt.Sprintf("Creating secret with name %s", secret.Name)) + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + framework.Failf("unable to create test secret %s: %v", secret.Name, err) + } + + pod := projectedAllVolumeBasePod(podName, secretName, configMapName, nil, nil) + pod.Spec.Containers = []v1.Container{ + { + Name: "projected-all-volume-test", + Image: "gcr.io/google_containers/busybox:1.24", + Command: []string{"sh", "-c", "cat /all/podname && cat /all/secret-data && cat /all/configmap-data"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: "podinfo", + MountPath: "/all", + ReadOnly: false, + }, + }, + }, + } + f.TestContainerOutput("Check all projections for projected volume plugin", pod, 0, []string{ + fmt.Sprintf("%s", podName), + "secret-value-1", + "configmap-value-1", + }) + }) +}) + +func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string, fsGroup *int64, uid *int64) { + var ( + volumeName = "projected-secret-volume" + volumeMountPath = "/etc/projected-secret-volume" + secret = secretForTest(f.Namespace.Name, secretName) + ) + + By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) + var err error + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + framework.Failf("unable to create test secret %s: %v", secret.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-secrets-" + string(uuid.NewUUID()), + Namespace: f.Namespace.Name, + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: secretName, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "projected-secret-volume-test", + Image: "gcr.io/google_containers/mounttest:0.8", + Args: []string{ + "--file_content=/etc/projected-secret-volume/data-1", + "--file_mode=/etc/projected-secret-volume/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumeMountPath, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + if defaultMode != nil { + //pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.DefaultMode = defaultMode + pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode + } else { + mode := int32(0644) + defaultMode = &mode + } + + if fsGroup != nil || uid != nil { + pod.Spec.SecurityContext = &v1.PodSecurityContext{ + FSGroup: fsGroup, + RunAsUser: uid, + } + } + + modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode)) + expectedOutput := []string{ + "content of file \"/etc/projected-secret-volume/data-1\": value-1", + "mode of file \"/etc/projected-secret-volume/data-1\": " + modeString, + } + + f.TestContainerOutput("consume secrets", pod, 0, expectedOutput) +} + +func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { + var ( + name = "projected-secret-test-map-" + string(uuid.NewUUID()) + volumeName = "projected-secret-volume" + volumeMountPath = "/etc/projected-secret-volume" + secret = secretForTest(f.Namespace.Name, name) + ) + + By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) + var err error + if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil { + framework.Failf("unable to create test secret %s: %v", secret.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-secrets-" + string(uuid.NewUUID()), + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + Items: []v1.KeyToPath{ + { + Key: "data-1", + Path: "new-path-data-1", + }, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "projected-secret-volume-test", + Image: "gcr.io/google_containers/mounttest:0.8", + Args: []string{ + "--file_content=/etc/projected-secret-volume/new-path-data-1", + "--file_mode=/etc/projected-secret-volume/new-path-data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumeMountPath, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + if mode != nil { + //pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.Items[0].Mode = mode + pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = mode + } else { + defaultItemMode := int32(0644) + mode = &defaultItemMode + } + + modeString := fmt.Sprintf("%v", os.FileMode(*mode)) + expectedOutput := []string{ + "content of file \"/etc/projected-secret-volume/new-path-data-1\": value-1", + "mode of file \"/etc/projected-secret-volume/new-path-data-1\": " + modeString, + } + + f.TestContainerOutput("consume secrets", pod, 0, expectedOutput) +} + +func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) { + var ( + name = "projected-configmap-test-volume-" + string(uuid.NewUUID()) + volumeName = "projected-configmap-volume" + volumeMountPath = "/etc/projected-configmap-volume" + configMap = newConfigMap(f, name) + ) + + By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) + var err error + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-configmaps-" + string(uuid.NewUUID()), + }, + Spec: v1.PodSpec{ + SecurityContext: &v1.PodSecurityContext{}, + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "projected-configmap-volume-test", + Image: "gcr.io/google_containers/mounttest:0.8", + Args: []string{ + "--file_content=/etc/projected-configmap-volume/data-1", + "--file_mode=/etc/projected-configmap-volume/data-1"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumeMountPath, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + if uid != 0 { + pod.Spec.SecurityContext.RunAsUser = &uid + } + + if fsGroup != 0 { + pod.Spec.SecurityContext.FSGroup = &fsGroup + } + if defaultMode != nil { + //pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].ConfigMap.DefaultMode = defaultMode + pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode + } else { + mode := int32(0644) + defaultMode = &mode + } + + modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode)) + output := []string{ + "content of file \"/etc/projected-configmap-volume/data-1\": value-1", + "mode of file \"/etc/projected-configmap-volume/data-1\": " + modeString, + } + f.TestContainerOutput("consume configMaps", pod, 0, output) +} + +func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) { + var ( + name = "projected-configmap-test-volume-map-" + string(uuid.NewUUID()) + volumeName = "projected-configmap-volume" + volumeMountPath = "/etc/projected-configmap-volume" + configMap = newConfigMap(f, name) + ) + + By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) + + var err error + if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { + framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + } + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-projected-configmaps-" + string(uuid.NewUUID()), + }, + Spec: v1.PodSpec{ + SecurityContext: &v1.PodSecurityContext{}, + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: name, + }, + Items: []v1.KeyToPath{ + { + Key: "data-2", + Path: "path/to/data-2", + }, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: "projected-configmap-volume-test", + Image: "gcr.io/google_containers/mounttest:0.8", + Args: []string{"--file_content=/etc/projected-configmap-volume/path/to/data-2", + "--file_mode=/etc/projected-configmap-volume/path/to/data-2"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumeMountPath, + ReadOnly: true, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + if uid != 0 { + pod.Spec.SecurityContext.RunAsUser = &uid + } + + if fsGroup != 0 { + pod.Spec.SecurityContext.FSGroup = &fsGroup + } + if itemMode != nil { + //pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode + pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = itemMode + } else { + mode := int32(0644) + itemMode = &mode + } + + // Just check file mode if fsGroup is not set. If fsGroup is set, the + // final mode is adjusted and we are not testing that case. + output := []string{ + "content of file \"/etc/projected-configmap-volume/path/to/data-2\": value-2", + } + if fsGroup == 0 { + modeString := fmt.Sprintf("%v", os.FileMode(*itemMode)) + output = append(output, "mode of file \"/etc/projected-configmap-volume/path/to/data-2\": "+modeString) + } + f.TestContainerOutput("consume configMaps", pod, 0, output) +} + +func projectedDownwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod { + pod := projectedDownwardAPIVolumeBasePod(name, nil, nil) + + pod.Spec.Containers = []v1.Container{ + { + Name: "client-container", + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--file_mode=" + filePath}, + VolumeMounts: []v1.VolumeMount{ + { + Name: "podinfo", + MountPath: "/etc", + }, + }, + }, + } + if itemMode != nil { + pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items[0].Mode = itemMode + } + if defaultMode != nil { + pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode + } + + return pod +} + +func projectedDownwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod { + pod := projectedDownwardAPIVolumeBasePod(name, labels, annotations) + + pod.Spec.Containers = []v1.Container{ + { + Name: "client-container", + Image: "gcr.io/google_containers/mounttest:0.8", + Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath}, + VolumeMounts: []v1.VolumeMount{ + { + Name: "podinfo", + MountPath: "/etc", + ReadOnly: false, + }, + }, + }, + } + + applyLabelsAndAnnotationsToProjectedDownwardAPIPod(labels, annotations, pod) + return pod +} + +func projectedDownwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + Annotations: annotations, + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "podinfo", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + DownwardAPI: &v1.DownwardAPIProjection{ + Items: []v1.DownwardAPIVolumeFile{ + { + Path: "podname", + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + { + Path: "cpu_limit", + ResourceFieldRef: &v1.ResourceFieldSelector{ + ContainerName: "client-container", + Resource: "limits.cpu", + }, + }, + { + Path: "cpu_request", + ResourceFieldRef: &v1.ResourceFieldSelector{ + ContainerName: "client-container", + Resource: "requests.cpu", + }, + }, + { + Path: "memory_limit", + ResourceFieldRef: &v1.ResourceFieldSelector{ + ContainerName: "client-container", + Resource: "limits.memory", + }, + }, + { + Path: "memory_request", + ResourceFieldRef: &v1.ResourceFieldSelector{ + ContainerName: "client-container", + Resource: "requests.memory", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + return pod +} + +func applyLabelsAndAnnotationsToProjectedDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) { + if len(labels) > 0 { + pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{ + Path: "labels", + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels", + }, + }) + } + + if len(annotations) > 0 { + pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{ + Path: "annotations", + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.annotations", + }, + }) + } +} + +func projectedAllVolumeBasePod(podName string, secretName string, configMapName string, labels, annotations map[string]string) *v1.Pod { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: labels, + Annotations: annotations, + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "podinfo", + VolumeSource: v1.VolumeSource{ + Projected: &v1.ProjectedVolumeSource{ + Sources: []v1.VolumeProjection{ + { + DownwardAPI: &v1.DownwardAPIProjection{ + Items: []v1.DownwardAPIVolumeFile{ + { + Path: "podname", + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + }, + }, + { + Secret: &v1.SecretProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: secretName, + }, + }, + }, + { + ConfigMap: &v1.ConfigMapProjection{ + LocalObjectReference: v1.LocalObjectReference{ + Name: configMapName, + }, + }, + }, + }, + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + }, + } + + return pod +}