Merge pull request #77852 from k-toyoda-pi/golint_test_e2e_common_p-r

Fix golint failures of test/e2e/common/[p-r]*
k3s-v1.15.3
Kubernetes Prow Robot 2019-05-28 22:21:00 -07:00 committed by GitHub
commit 65188f8690
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 139 additions and 138 deletions

View File

@ -19,14 +19,15 @@ package common
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// PrivilegedPodTestConfig is configuration struct for privileged pod test
type PrivilegedPodTestConfig struct {
f *framework.Framework
@ -45,15 +46,15 @@ var _ = framework.KubeDescribe("PrivilegedPod [NodeConformance]", func() {
notPrivilegedContainer: "not-privileged-container",
}
It("should enable privileged commands [LinuxOnly]", func() {
ginkgo.It("should enable privileged commands [LinuxOnly]", func() {
// Windows does not support privileged containers.
By("Creating a pod with a privileged container")
ginkgo.By("Creating a pod with a privileged container")
config.createPods()
By("Executing in the privileged container")
ginkgo.By("Executing in the privileged container")
config.run(config.privilegedContainer, true)
By("Executing in the non-privileged container")
ginkgo.By("Executing in the non-privileged container")
config.run(config.notPrivilegedContainer, false)
})
})
@ -75,7 +76,7 @@ func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool)
framework.ExpectNoError(err,
fmt.Sprintf("could not remove dummy1 link: %v", err))
} else {
Expect(err).To(HaveOccurred(), msg)
gomega.Expect(err).To(gomega.HaveOccurred(), msg)
}
}

View File

@ -25,10 +25,10 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
var _ = Describe("[sig-storage] Projected combined", func() {
var _ = ginkgo.Describe("[sig-storage] Projected combined", func() {
f := framework.NewDefaultFramework("projected")
// Test multiple projections
@ -61,11 +61,11 @@ var _ = Describe("[sig-storage] Projected combined", func() {
},
}
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}

View File

@ -26,11 +26,11 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Projected configMap", func() {
var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
f := framework.NewDefaultFramework("projected")
/*
@ -53,7 +53,7 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
})
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
})
@ -68,7 +68,7 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() {
doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
})
@ -102,7 +102,7 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() {
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() {
doProjectedConfigMapE2EWithMappings(f, 1000, 1001, nil)
})
@ -129,7 +129,7 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
},
}
By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -175,23 +175,23 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
pollLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume")
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
ginkgo.By("waiting to observe update in volume")
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
})
/*
@ -244,13 +244,13 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
},
}
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
@ -354,45 +354,45 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
pollCreateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
By("waiting to observe update in volume")
ginkgo.By("waiting to observe update in volume")
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1"))
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1"))
})
/*
@ -410,7 +410,7 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -488,21 +488,21 @@ var _ = Describe("[sig-storage] Projected configMap", func() {
//The pod is in pending during volume creation until the configMap objects are available
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/projected-configmap-volumes"
podName := "pod-projected-configmaps-" + string(uuid.NewUUID())
err := createNonOptionalConfigMapPod(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
gomega.Expect(err).To(gomega.HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
})
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/configmap-volumes"
podName := "pod-configmaps-" + string(uuid.NewUUID())
err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
gomega.Expect(err).To(gomega.HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
})
})
@ -517,7 +517,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
@ -598,7 +598,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {

View File

@ -26,17 +26,17 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Projected downwardAPI", func() {
var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() {
f := framework.NewDefaultFramework("projected")
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
var podClient *framework.PodClient
BeforeEach(func() {
ginkgo.BeforeEach(func() {
podClient = f.PodClient()
})
@ -86,7 +86,7 @@ var _ = Describe("[sig-storage] Projected downwardAPI", func() {
})
})
It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() {
ginkgo.It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
@ -100,7 +100,7 @@ var _ = Describe("[sig-storage] Projected downwardAPI", func() {
})
})
It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() {
ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
@ -128,23 +128,23 @@ var _ = Describe("[sig-storage] Projected downwardAPI", func() {
podName := "labelsupdate" + string(uuid.NewUUID())
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
containerName := "client-container"
By("Creating the pod")
ginkgo.By("Creating the pod")
podClient.CreateSync(pod)
Eventually(func() (string, error) {
gomega.Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
Eventually(func() (string, error) {
gomega.Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
})
/*
@ -159,26 +159,26 @@ var _ = Describe("[sig-storage] Projected downwardAPI", func() {
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/podinfo/annotations")
containerName := "client-container"
By("Creating the pod")
ginkgo.By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) {
gomega.Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
Eventually(func() (string, error) {
gomega.Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
})
/*

View File

@ -26,11 +26,11 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Projected secret", func() {
var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
f := framework.NewDefaultFramework("projected")
/*
@ -86,7 +86,7 @@ var _ = Describe("[sig-storage] Projected secret", func() {
doProjectedSecretE2EWithMapping(f, &mode)
})
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
var (
namespace2 *v1.Namespace
err error
@ -125,7 +125,7 @@ var _ = Describe("[sig-storage] Projected secret", func() {
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
@ -252,13 +252,13 @@ var _ = Describe("[sig-storage] Projected secret", func() {
},
}
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
@ -362,65 +362,65 @@ var _ = Describe("[sig-storage] Projected secret", func() {
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
ginkgo.By("Creating the pod")
f.PodClient().CreateSync(pod)
pollCreateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1"))
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
By("waiting to observe update in volume")
ginkgo.By("waiting to observe update in volume")
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1"))
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1"))
})
//The secret is in pending during volume creation until the secret objects are available
//or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/projected-secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
gomega.Expect(err).To(gomega.HaveOccurred(), "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
//Secret object defined for the pod, If a key is specified which is not present in the secret,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
gomega.Expect(err).To(gomega.HaveOccurred(), "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
})
@ -432,7 +432,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
secret = secretForTest(f.Namespace.Name, secretName)
)
By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
@ -510,7 +510,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)

View File

@ -27,16 +27,16 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
var _ = framework.KubeDescribe("Container Runtime", func() {
f := framework.NewDefaultFramework("container-runtime")
Describe("blackbox test", func() {
Context("when starting a container that exits", func() {
ginkgo.Describe("blackbox test", func() {
ginkgo.Context("when starting a container that exits", func() {
/*
Release : v1.13
@ -101,32 +101,32 @@ while true; do sleep 1; done
terminateContainer.Create()
defer terminateContainer.Delete()
By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
Eventually(func() (int32, error) {
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
gomega.Eventually(func() (int32, error) {
status, err := terminateContainer.GetStatus()
return status.RestartCount, err
}, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.RestartCount))
}, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.RestartCount))
By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name))
Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.Phase))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name))
gomega.Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(testCase.Phase))
By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
gomega.Expect(terminateContainer.IsReady()).Should(gomega.Equal(testCase.Ready))
status, err := terminateContainer.GetStatus()
framework.ExpectNoError(err)
By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
gomega.Expect(GetContainerState(status.State)).To(gomega.Equal(testCase.State))
By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name))
Expect(terminateContainer.Delete()).To(Succeed())
Eventually(terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(BeFalse())
ginkgo.By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name))
gomega.Expect(terminateContainer.Delete()).To(gomega.Succeed())
gomega.Eventually(terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.BeFalse())
}
})
})
Context("on terminated container", func() {
ginkgo.Context("on terminated container", func() {
rootUser := int64(0)
nonRootUser := int64(10000)
@ -139,29 +139,29 @@ while true; do sleep 1; done
RestartPolicy: v1.RestartPolicyNever,
}
By("create the container")
ginkgo.By("create the container")
c.Create()
defer c.Delete()
By(fmt.Sprintf("wait for the container to reach %s", expectedPhase))
Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(expectedPhase))
ginkgo.By(fmt.Sprintf("wait for the container to reach %s", expectedPhase))
gomega.Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.Equal(expectedPhase))
By("get the container status")
ginkgo.By("get the container status")
status, err := c.GetStatus()
framework.ExpectNoError(err)
By("the container should be terminated")
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
ginkgo.By("the container should be terminated")
gomega.Expect(GetContainerState(status.State)).To(gomega.Equal(ContainerStateTerminated))
By("the termination message should be set")
ginkgo.By("the termination message should be set")
e2elog.Logf("Expected: %v to match Container's Termination Message: %v --", expectedMsg, status.State.Terminated.Message)
Expect(status.State.Terminated.Message).Should(expectedMsg)
gomega.Expect(status.State.Terminated.Message).Should(expectedMsg)
By("delete the container")
Expect(c.Delete()).To(Succeed())
ginkgo.By("delete the container")
gomega.Expect(c.Delete()).To(gomega.Succeed())
}
It("should report termination message [LinuxOnly] if TerminationMessagePath is set [NodeConformance]", func() {
ginkgo.It("should report termination message [LinuxOnly] if TerminationMessagePath is set [NodeConformance]", func() {
// Cannot mount files in Windows Containers.
container := v1.Container{
Image: framework.BusyBoxImage,
@ -172,7 +172,7 @@ while true; do sleep 1; done
RunAsUser: &rootUser,
},
}
matchTerminationMessage(container, v1.PodSucceeded, Equal("DONE"))
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("DONE"))
})
/*
@ -191,7 +191,7 @@ while true; do sleep 1; done
RunAsUser: &nonRootUser,
},
}
matchTerminationMessage(container, v1.PodSucceeded, Equal("DONE"))
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("DONE"))
})
/*
@ -208,7 +208,7 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
}
matchTerminationMessage(container, v1.PodFailed, Equal("DONE"))
matchTerminationMessage(container, v1.PodFailed, gomega.Equal("DONE"))
})
/*
@ -225,7 +225,7 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
}
matchTerminationMessage(container, v1.PodSucceeded, Equal(""))
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal(""))
})
/*
@ -242,11 +242,11 @@ while true; do sleep 1; done
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
}
matchTerminationMessage(container, v1.PodSucceeded, Equal("OK"))
matchTerminationMessage(container, v1.PodSucceeded, gomega.Equal("OK"))
})
})
Context("when running a container with a new image", func() {
ginkgo.Context("when running a container with a new image", func() {
// Images used for ConformanceContainer are not added into NodeImageWhiteList, because this test is
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
@ -284,7 +284,7 @@ while true; do sleep 1; done
Type: v1.SecretTypeDockerConfigJson,
}
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
By("create image pull secret")
ginkgo.By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
framework.ExpectNoError(err)
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
@ -331,15 +331,15 @@ while true; do sleep 1; done
const flakeRetry = 3
for i := 1; i <= flakeRetry; i++ {
var err error
By("create the container")
ginkgo.By("create the container")
container.Create()
By("check the container status")
ginkgo.By("check the container status")
for start := time.Now(); time.Since(start) < ContainerStatusRetryTimeout; time.Sleep(ContainerStatusPollInterval) {
if err = checkContainerStatus(); err == nil {
break
}
}
By("delete the container")
ginkgo.By("delete the container")
container.Delete()
if err == nil {
break
@ -352,50 +352,50 @@ while true; do sleep 1; done
}
}
It("should not be able to pull image from invalid registry [NodeConformance]", func() {
ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func() {
image := "invalid.com/invalid/alpine:3.1"
imagePullTest(image, false, v1.PodPending, true, false)
})
It("should not be able to pull non-existing image from gcr.io [NodeConformance]", func() {
ginkgo.It("should not be able to pull non-existing image from gcr.io [NodeConformance]", func() {
image := "k8s.gcr.io/invalid-image:invalid-tag"
imagePullTest(image, false, v1.PodPending, true, false)
})
It("should be able to pull image from gcr.io [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should be able to pull image from gcr.io [LinuxOnly] [NodeConformance]", func() {
image := "gcr.io/google-containers/debian-base:0.4.1"
imagePullTest(image, false, v1.PodRunning, false, false)
})
It("should be able to pull image from gcr.io [NodeConformance]", func() {
ginkgo.It("should be able to pull image from gcr.io [NodeConformance]", func() {
framework.SkipUnlessNodeOSDistroIs("windows")
image := "gcr.io/kubernetes-e2e-test-images/windows-nanoserver:v1"
imagePullTest(image, false, v1.PodRunning, false, true)
})
It("should be able to pull image from docker hub [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should be able to pull image from docker hub [LinuxOnly] [NodeConformance]", func() {
image := "alpine:3.7"
imagePullTest(image, false, v1.PodRunning, false, false)
})
It("should be able to pull image from docker hub [NodeConformance]", func() {
ginkgo.It("should be able to pull image from docker hub [NodeConformance]", func() {
framework.SkipUnlessNodeOSDistroIs("windows")
// TODO(claudiub): Switch to nanoserver image manifest list.
image := "e2eteam/busybox:1.29"
imagePullTest(image, false, v1.PodRunning, false, true)
})
It("should not be able to pull from private registry without secret [NodeConformance]", func() {
ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func() {
image := "gcr.io/authenticated-image-pulling/alpine:3.7"
imagePullTest(image, false, v1.PodPending, true, false)
})
It("should be able to pull from private registry with secret [LinuxOnly] [NodeConformance]", func() {
ginkgo.It("should be able to pull from private registry with secret [LinuxOnly] [NodeConformance]", func() {
image := "gcr.io/authenticated-image-pulling/alpine:3.7"
imagePullTest(image, true, v1.PodRunning, false, false)
})
It("should be able to pull from private registry with secret [NodeConformance]", func() {
ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func() {
framework.SkipUnlessNodeOSDistroIs("windows")
image := "gcr.io/authenticated-image-pulling/windows-nanoserver:v1"
imagePullTest(image, true, v1.PodRunning, false, true)

View File

@ -31,7 +31,7 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
utilpointer "k8s.io/utils/pointer"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
const (
@ -43,16 +43,16 @@ const (
DockerRuntimeHandler = "docker"
)
var _ = Describe("[sig-node] RuntimeClass", func() {
var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
f := framework.NewDefaultFramework("runtimeclass")
It("should reject a Pod requesting a non-existent RuntimeClass", func() {
ginkgo.It("should reject a Pod requesting a non-existent RuntimeClass", func() {
rcName := f.Namespace.Name + "-nonexistent"
pod := createRuntimeClassPod(f, rcName)
expectSandboxFailureEvent(f, pod, fmt.Sprintf("\"%s\" not found", rcName))
})
It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", func() {
ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", func() {
handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(f, "unconfigured-handler", handler)
pod := createRuntimeClassPod(f, rcName)
@ -60,7 +60,7 @@ var _ = Describe("[sig-node] RuntimeClass", func() {
})
// This test requires that the PreconfiguredRuntimeHandler has already been set up on nodes.
It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func() {
ginkgo.It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func() {
// The built-in docker runtime does not support configuring runtime handlers.
handler := PreconfiguredRuntimeHandler
if framework.TestContext.ContainerRuntime == "docker" {
@ -72,15 +72,15 @@ var _ = Describe("[sig-node] RuntimeClass", func() {
expectPodSuccess(f, pod)
})
It("should reject a Pod requesting a deleted RuntimeClass", func() {
ginkgo.It("should reject a Pod requesting a deleted RuntimeClass", func() {
rcName := createRuntimeClass(f, "delete-me", "runc")
rcClient := f.ClientSet.NodeV1beta1().RuntimeClasses()
By("Deleting RuntimeClass "+rcName, func() {
ginkgo.By("Deleting RuntimeClass "+rcName, func() {
err := rcClient.Delete(rcName, nil)
framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName)
By("Waiting for the RuntimeClass to disappear")
ginkgo.By("Waiting for the RuntimeClass to disappear")
framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {
_, err := rcClient.Get(rcName, metav1.GetOptions{})
if errors.IsNotFound(err) {