mirror of https://github.com/k3s-io/k3s
Merge pull request #49749 from sbezverk/e2e_selinux_local_starage_test
Automatic merge from submit-queue (batch tested with PRs 51377, 46580, 50998, 51466, 49749) Adding e2e SELinux test for local storage Adding e2e test for SELinux enabled local storage /sig storage Closes #45054pull/6/head
commit
01e961b380
|
@ -838,6 +838,57 @@ func MakePod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool,
|
|||
return podSpec
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
|
||||
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
}
|
||||
podName := "security-context-" + string(uuid.NewUUID())
|
||||
fsGroup := int64(1000)
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
HostIPC: hostIPC,
|
||||
HostPID: hostPID,
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup,
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Volumes = volumes
|
||||
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
|
||||
return podSpec
|
||||
}
|
||||
|
||||
// create pod with given claims
|
||||
func CreatePod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, pvclaims, isPrivileged, command)
|
||||
|
@ -858,6 +909,26 @@ func CreatePod(client clientset.Interface, namespace string, pvclaims []*v1.Pers
|
|||
return pod, nil
|
||||
}
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
||||
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||
return CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||
|
|
|
@ -48,6 +48,7 @@ go_library(
|
|||
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
@ -92,8 +93,8 @@ const (
|
|||
// volumeConfigName is the configmap passed to bootstrapper and provisioner
|
||||
volumeConfigName = "local-volume-config"
|
||||
// bootstrapper and provisioner images used for e2e tests
|
||||
bootstrapperImageName = "quay.io/external_storage/local-volume-provisioner-bootstrap:v1.0.0"
|
||||
provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v1.0.0"
|
||||
bootstrapperImageName = "quay.io/external_storage/local-volume-provisioner-bootstrap:v1.0.1"
|
||||
provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v1.0.1"
|
||||
// provisioner daemonSetName name, must match the one defined in bootstrapper
|
||||
daemonSetName = "local-volume-provisioner"
|
||||
// provisioner node/pv cluster role binding, must match the one defined in bootstrapper
|
||||
|
@ -103,6 +104,10 @@ const (
|
|||
testRequestSize = "10Mi"
|
||||
)
|
||||
|
||||
// Common selinux labels
|
||||
var selinuxLabel = &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1"}
|
||||
|
||||
var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [Serial]", func() {
|
||||
f := framework.NewDefaultFramework("persistent-local-volumes-test")
|
||||
|
||||
|
@ -264,7 +269,7 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S
|
|||
|
||||
It("should create and recreate local persistent volume", func() {
|
||||
By("Creating bootstrapper pod to start provisioner daemonset")
|
||||
createBootstrapperPod(config)
|
||||
createBootstrapperJob(config)
|
||||
kind := schema.GroupKind{Group: "extensions", Kind: "DaemonSet"}
|
||||
framework.WaitForControlledPodsRunning(config.client, config.ns, daemonSetName, kind)
|
||||
|
||||
|
@ -273,7 +278,6 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S
|
|||
mkdirCmd := fmt.Sprintf("mkdir %v -m 777", volumePath)
|
||||
err := framework.IssueSSHCommand(mkdirCmd, framework.TestContext.Provider, config.node0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for a PersitentVolume to be created")
|
||||
oldPV, err := waitForLocalPersistentVolume(config.client, volumePath)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -490,7 +494,6 @@ func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framewo
|
|||
func createLocalPVCPV(config *localTestConfig, volume *localTestVolume) {
|
||||
pvcConfig := makeLocalPVCConfig(config)
|
||||
pvConfig := makeLocalPVConfig(config, volume)
|
||||
|
||||
var err error
|
||||
volume.pv, volume.pvc, err = framework.CreatePVPVC(config.client, pvConfig, pvcConfig, config.ns, true)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -498,11 +501,21 @@ func createLocalPVCPV(config *localTestConfig, volume *localTestVolume) {
|
|||
}
|
||||
|
||||
func makeLocalPod(config *localTestConfig, volume *localTestVolume, cmd string) *v1.Pod {
|
||||
return framework.MakePod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd)
|
||||
return framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd, false, false, selinuxLabel)
|
||||
}
|
||||
|
||||
// createSecPod should be used when Pod requires non default SELinux labels
|
||||
func createSecPod(config *localTestConfig, volume *localTestVolume, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
|
||||
pod, err := framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", hostIPC, hostPID, seLinuxLabel)
|
||||
podNodeName, podNodeNameErr := podNodeName(config, pod)
|
||||
Expect(podNodeNameErr).NotTo(HaveOccurred())
|
||||
framework.Logf("Security Context POD %q created on Node %q", pod.Name, podNodeName)
|
||||
Expect(podNodeName).To(Equal(config.node0.Name))
|
||||
return pod, err
|
||||
}
|
||||
|
||||
func createLocalPod(config *localTestConfig, volume *localTestVolume) (*v1.Pod, error) {
|
||||
return framework.CreatePod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "")
|
||||
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel)
|
||||
}
|
||||
|
||||
func createAndMountTmpfsLocalVolume(config *localTestConfig, dir string) {
|
||||
|
@ -669,43 +682,47 @@ func createVolumeConfigMap(config *localTestConfig) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createBootstrapperPod(config *localTestConfig) {
|
||||
pod := &v1.Pod{
|
||||
func createBootstrapperJob(config *localTestConfig) {
|
||||
bootJob := &batchv1.Job{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
Kind: "Job",
|
||||
APIVersion: "batch/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "local-volume-tester-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
ServiceAccountName: testServiceAccount,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "volume-tester",
|
||||
Image: bootstrapperImageName,
|
||||
Env: []v1.EnvVar{
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
ServiceAccountName: testServiceAccount,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "MY_NAMESPACE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
Name: "volume-tester",
|
||||
Image: bootstrapperImageName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "MY_NAMESPACE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Args: []string{
|
||||
fmt.Sprintf("--image=%v", provisionerImageName),
|
||||
fmt.Sprintf("--volume-config=%v", volumeConfigName),
|
||||
},
|
||||
},
|
||||
},
|
||||
Args: []string{
|
||||
fmt.Sprintf("--image=%v", provisionerImageName),
|
||||
fmt.Sprintf("--volume-config=%v", volumeConfigName),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
|
||||
job, err := config.client.Batch().Jobs(config.ns).Create(bootJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForPodSuccessInNamespace(config.client, pod.Name, pod.Namespace)
|
||||
err = framework.WaitForJobFinish(config.client, config.ns, job.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
|
@ -735,6 +752,7 @@ func newLocalClaim(config *localTestConfig) *v1.PersistentVolumeClaim {
|
|||
// waitForLocalPersistentVolume waits a local persistent volume with 'volumePath' to be available.
|
||||
func waitForLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.PersistentVolume, error) {
|
||||
var pv *v1.PersistentVolume
|
||||
|
||||
for start := time.Now(); time.Since(start) < 10*time.Minute && pv == nil; time.Sleep(5 * time.Second) {
|
||||
pvs, err := c.Core().PersistentVolumes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue