From cee02e62b7cd23c498e9c528cdc82c793e5afa8f Mon Sep 17 00:00:00 2001 From: danielqsj Date: Mon, 25 Feb 2019 11:41:27 +0800 Subject: [PATCH] Fix golint failures for e2e/windows --- hack/.golint_failures | 1 - test/e2e/windows/density.go | 34 +++++++++++++++++----------------- test/e2e/windows/framework.go | 1 + test/e2e/windows/networking.go | 16 ++++++++-------- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/hack/.golint_failures b/hack/.golint_failures index c028ddbbcc..013128aa14 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -661,7 +661,6 @@ test/e2e/storage/drivers test/e2e/storage/testsuites test/e2e/storage/utils test/e2e/storage/vsphere -test/e2e/windows test/e2e_kubeadm test/e2e_node test/e2e_node/builder diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index 0bdc777b4d..d94a25712b 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -32,20 +32,20 @@ import ( "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("[Feature:Windows] Density [Serial] [Slow]", func() { f := framework.NewDefaultFramework("density-test-windows") - BeforeEach(func() { + ginkgo.BeforeEach(func() { // NOTE(vyta): these tests are Windows specific framework.SkipUnlessNodeOSDistroIs("windows") }) - Context("create a batch of pods", func() { + ginkgo.Context("create a batch of pods", func() { // TODO(coufon): the values are generous, set more precise limits with benchmark data // and add more tests dTests := []densityTest{ @@ -66,7 +66,7 @@ var _ = SIGDescribe("[Feature:Windows] Density [Serial] [Slow]", func() { for _, testArg := range dTests { itArg := testArg desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval", itArg.podsNr, itArg.interval) - It(desc, func() { + ginkgo.It(desc, func() { itArg.createMethod = "batch" runDensityBatchTest(f, itArg) @@ -114,15 +114,15 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura go controller.Run(stopCh) defer close(stopCh) - By("Creating a batch of pods") + ginkgo.By("Creating a batch of pods") // It returns a map['pod name']'creation time' containing the creation timestamps createTimes := createBatchPodWithRateControl(f, pods, testArg.interval) - By("Waiting for all Pods to be observed by the watch...") + ginkgo.By("Waiting for all Pods to be observed by the watch...") - Eventually(func() bool { + gomega.Eventually(func() bool { return len(watchTimes) == testArg.podsNr - }, 10*time.Minute, 10*time.Second).Should(BeTrue()) + }, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue()) if len(watchTimes) < testArg.podsNr { framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.") @@ -138,7 +138,7 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura for name, create := range createTimes { watch, ok := watchTimes[name] - Expect(ok).To(Equal(true)) + gomega.Expect(ok).To(gomega.Equal(true)) e2eLags = append(e2eLags, framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)}) @@ -182,7 +182,7 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m checkPodRunning := func(p *v1.Pod) { mutex.Lock() defer mutex.Unlock() - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() if p.Status.Phase == v1.PodRunning { if _, found := watchTimes[p.Name]; !found { @@ -208,12 +208,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { p, ok := obj.(*v1.Pod) - Expect(ok).To(Equal(true)) + gomega.Expect(ok).To(gomega.Equal(true)) go checkPodRunning(p) }, UpdateFunc: func(oldObj, newObj interface{}) { p, ok := newObj.(*v1.Pod) - Expect(ok).To(Equal(true)) + gomega.Expect(ok).To(gomega.Equal(true)) go checkPodRunning(p) }, }, @@ -288,14 +288,14 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { for _, pod := range pods { wg.Add(1) go func(pod *v1.Pod) { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() defer wg.Done() err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), - 30*time.Second, 10*time.Minute)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), + 30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred()) }(pod) } wg.Wait() diff --git a/test/e2e/windows/framework.go b/test/e2e/windows/framework.go index e6e03def3a..c7288734bc 100644 --- a/test/e2e/windows/framework.go +++ b/test/e2e/windows/framework.go @@ -18,6 +18,7 @@ package windows import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-windows] "+text, body) } diff --git a/test/e2e/windows/networking.go b/test/e2e/windows/networking.go index ba7e007478..54f5c4cbe6 100644 --- a/test/e2e/windows/networking.go +++ b/test/e2e/windows/networking.go @@ -20,21 +20,21 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) // NOTE(claudiub): Spawning Pods With HostNetwork enabled is not currently supported by Windows Kubelet. // TODO(claudiub): Remove this test suite once this PR merges: // https://github.com/kubernetes/kubernetes/pull/69525 -var _ = Describe("[sig-network] [sig-windows] Networking", func() { +var _ = ginkgo.Describe("[sig-network] [sig-windows] Networking", func() { f := framework.NewDefaultFramework("pod-network-test") - BeforeEach(func() { + ginkgo.BeforeEach(func() { // NOTE(claudiub): These tests are Windows specific. framework.SkipUnlessNodeOSDistroIs("windows") }) - Describe("Granular Checks: Pods", func() { + ginkgo.Describe("Granular Checks: Pods", func() { // Try to hit all endpoints through a test container, retry 5 times, // expect exactly one unique hostname. Each of these endpoints reports @@ -45,7 +45,7 @@ var _ = Describe("[sig-network] [sig-windows] Networking", func() { Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ - It("should function for intra-pod communication: http", func() { + ginkgo.It("should function for intra-pod communication: http", func() { config := framework.NewCoreNetworkingTestConfig(f, false) for _, endpointPod := range config.EndpointPods { config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -58,7 +58,7 @@ var _ = Describe("[sig-network] [sig-windows] Networking", func() { Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes. The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ - It("should function for intra-pod communication: udp", func() { + ginkgo.It("should function for intra-pod communication: udp", func() { config := framework.NewCoreNetworkingTestConfig(f, false) for _, endpointPod := range config.EndpointPods { config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -71,7 +71,7 @@ var _ = Describe("[sig-network] [sig-windows] Networking", func() { Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ - It("should function for node-pod communication: http", func() { + ginkgo.It("should function for node-pod communication: http", func() { config := framework.NewCoreNetworkingTestConfig(f, false) for _, endpointPod := range config.EndpointPods { config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -84,7 +84,7 @@ var _ = Describe("[sig-network] [sig-windows] Networking", func() { Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ - It("should function for node-pod communication: udp", func() { + ginkgo.It("should function for node-pod communication: udp", func() { config := framework.NewCoreNetworkingTestConfig(f, false) for _, endpointPod := range config.EndpointPods { config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))