Fix golint failures of e2e/framework/p*.go

This fixes golint failures of
- test/e2e/framework/perf_util.go
- test/e2e/framework/pods.go
- test/e2e/framework/profile_gatherer.go
- test/e2e/framework/provider.go
- test/e2e/framework/psp_util.go
k3s-v1.15.3
Kenichi Omichi 2019-03-29 17:55:19 +00:00
parent 733f2478d3
commit a81dd53e53
7 changed files with 63 additions and 34 deletions

View File

@ -407,7 +407,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
f.AddNamespacesToDelete(ns)
if err == nil && !f.SkipPrivilegedPSPBinding {
CreatePrivilegedPSPBinding(f, ns.Name)
createPrivilegedPSPBinding(f, ns.Name)
}
return ns, err

View File

@ -417,7 +417,7 @@ func (a *APIResponsiveness) PrintHumanReadable() string {
}
func (a *APIResponsiveness) PrintJSON() string {
return PrettyPrintJSON(ApiCallToPerfData(a))
return PrettyPrintJSON(APICallToPerfData(a))
}
func (a *APIResponsiveness) Len() int { return len(a.APICalls) }

View File

@ -25,13 +25,13 @@ import (
// TODO(random-liu): Change the tests to actually use PerfData from the beginning instead of
// translating one to the other here.
// currentApiCallMetricsVersion is the current apicall performance metrics version. We should
// currentAPICallMetricsVersion is the current apicall performance metrics version. We should
// bump up the version each time we make incompatible change to the metrics.
const currentApiCallMetricsVersion = "v1"
const currentAPICallMetricsVersion = "v1"
// ApiCallToPerfData transforms APIResponsiveness to PerfData.
func ApiCallToPerfData(apicalls *APIResponsiveness) *perftype.PerfData {
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
// APICallToPerfData transforms APIResponsiveness to PerfData.
func APICallToPerfData(apicalls *APIResponsiveness) *perftype.PerfData {
perfData := &perftype.PerfData{Version: currentAPICallMetricsVersion}
for _, apicall := range apicalls.APICalls {
item := perftype.DataItem{
Data: map[string]float64{
@ -70,7 +70,7 @@ func latencyToPerfData(l LatencyMetric, name string) perftype.DataItem {
// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData.
func PodStartupLatencyToPerfData(latency *PodStartupLatency) *perftype.PerfData {
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
perfData := &perftype.PerfData{Version: currentAPICallMetricsVersion}
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.CreateToScheduleLatency, "create_to_schedule"))
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToRunLatency, "schedule_to_run"))
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.RunToWatchLatency, "run_to_watch"))

View File

@ -34,8 +34,8 @@ import (
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const DefaultPodDeletionTimeout = 3 * time.Minute
@ -45,7 +45,7 @@ const DefaultPodDeletionTimeout = 3 * time.Minute
// node e2e test.
var ImageWhiteList sets.String
// Convenience method for getting a pod client interface in the framework's namespace,
// PodClient is a convenience method for getting a pod client interface in the framework's namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
// node e2e pod scheduling.
func (f *Framework) PodClient() *PodClient {
@ -55,7 +55,7 @@ func (f *Framework) PodClient() *PodClient {
}
}
// Convenience method for getting a pod client interface in an alternative namespace,
// PodClientNS is a convenience method for getting a pod client interface in an alternative namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
// node e2e pod scheduling.
func (f *Framework) PodClientNS(namespace string) *PodClient {
@ -65,6 +65,7 @@ func (f *Framework) PodClientNS(namespace string) *PodClient {
}
}
// PodClient is a struct for pod client.
type PodClient struct {
f *Framework
v1core.PodInterface
@ -96,15 +97,15 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
func (c *PodClient) CreateEventually(pod *v1.Pod, opts ...interface{}) *v1.Pod {
c.mungeSpec(pod)
var ret *v1.Pod
Eventually(func() error {
gomega.Eventually(func() error {
p, err := c.PodInterface.Create(pod)
ret = p
return err
}, opts...).ShouldNot(HaveOccurred(), "Failed to create %q pod", pod.GetName())
}, opts...).ShouldNot(gomega.HaveOccurred(), "Failed to create %q pod", pod.GetName())
return ret
}
// CreateSync creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
// CreateSyncInNamespace creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
func (c *PodClient) CreateSyncInNamespace(pod *v1.Pod, namespace string) *v1.Pod {
p := c.Create(pod)
ExpectNoError(WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
@ -127,7 +128,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
wg.Add(1)
go func(i int, pod *v1.Pod) {
defer wg.Done()
defer GinkgoRecover()
defer ginkgo.GinkgoRecover()
ps[i] = c.CreateSync(pod)
}(i, pod)
}
@ -171,8 +172,8 @@ func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options
if err != nil && !errors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err)
}
Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name)
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
}
// mungeSpec apply test-suite specific transformations to the pod spec.
@ -181,7 +182,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
return
}
Expect(pod.Spec.NodeName).To(Or(BeZero(), Equal(TestContext.NodeName)), "Test misconfigured")
gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(TestContext.NodeName)), "Test misconfigured")
pod.Spec.NodeName = TestContext.NodeName
// Node e2e does not support the default DNSClusterFirst policy. Set
// the policy to DNSDefault, which is configured per node.
@ -204,18 +205,18 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
}
// If the image policy is not PullAlways, the image must be in the white list and
// pre-pulled.
Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
gomega.Expect(ImageWhiteList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in white list should have
// been prepulled.
c.ImagePullPolicy = v1.PullNever
}
}
// TODO(random-liu): Move pod wait function into this file
// WaitForSuccess waits for pod to succeed.
// TODO(random-liu): Move pod wait function into this file
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
f := c.f
Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -226,13 +227,13 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
return false, nil
}
},
)).To(Succeed(), "wait for pod %q to success", name)
)).To(gomega.Succeed(), "wait for pod %q to success", name)
}
// WaitForFailure waits for pod to fail.
func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
f := c.f
Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -243,10 +244,10 @@ func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
return false, nil
}
},
)).To(Succeed(), "wait for pod %q to fail", name)
)).To(gomega.Succeed(), "wait for pod %q to fail", name)
}
// WaitForSuccess waits for pod to succeed or an error event for that pod.
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.Poll(Poll, PodStartTimeout, func() (bool, error) {
@ -287,6 +288,7 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
return nil
}
// PodIsReady returns true if the specified pod is ready. Otherwise false.
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(name, metav1.GetOptions{})
ExpectNoError(err)

View File

@ -28,7 +28,7 @@ import (
)
const (
// Default value for how long the CPU profile is gathered for.
// DefaultCPUProfileSeconds is default value for how long the CPU profile is gathered for.
DefaultCPUProfileSeconds = 30
)
@ -168,10 +168,12 @@ func gatherProfile(componentName, profileBaseName, profileKind string) error {
// that the function finishes. There's also a polling-based gatherer utility for
// CPU profiles available below.
// GatherCPUProfile gathers CPU profile.
func GatherCPUProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
GatherCPUProfileForSeconds(componentName, profileBaseName, DefaultCPUProfileSeconds, wg)
}
// GatherCPUProfileForSeconds gathers CPU profile for specified seconds.
func GatherCPUProfileForSeconds(componentName string, profileBaseName string, seconds int, wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()
@ -181,6 +183,7 @@ func GatherCPUProfileForSeconds(componentName string, profileBaseName string, se
}
}
// GatherMemoryProfile gathers memory profile.
func GatherMemoryProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
if wg != nil {
defer wg.Done()

View File

@ -27,6 +27,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
)
// Factory is a func which operates provider specific behavior.
type Factory func() (ProviderInterface, error)
var (
@ -112,45 +113,67 @@ type ProviderInterface interface {
// which doesn't do anything.
type NullProvider struct{}
// FrameworkBeforeEach is a base implementation which does BeforeEach.
func (n NullProvider) FrameworkBeforeEach(f *Framework) {}
func (n NullProvider) FrameworkAfterEach(f *Framework) {}
// FrameworkAfterEach is a base implementation which does AfterEach.
func (n NullProvider) FrameworkAfterEach(f *Framework) {}
// ResizeGroup is a base implementation which resizes group.
func (n NullProvider) ResizeGroup(string, int32) error {
return fmt.Errorf("Provider does not support InstanceGroups")
}
// GetGroupNodes is a base implementation which returns group nodes.
func (n NullProvider) GetGroupNodes(group string) ([]string, error) {
return nil, fmt.Errorf("provider does not support InstanceGroups")
}
// DeleteNode is a base implementation which returns group size.
func (n NullProvider) GroupSize(group string) (int, error) {
return -1, fmt.Errorf("provider does not support InstanceGroups")
}
// DeleteNode is a base implementation which deletes a node.
func (n NullProvider) DeleteNode(node *v1.Node) error {
return fmt.Errorf("provider does not support DeleteNode")
}
// CreatePD is a base implementation which creates PD.
func (n NullProvider) CreatePD(zone string) (string, error) {
return "", fmt.Errorf("provider does not support volume creation")
}
// DeletePD is a base implementation which deletes PD.
func (n NullProvider) DeletePD(pdName string) error {
return fmt.Errorf("provider does not support volume deletion")
}
// CreatePVSource is a base implementation which creates PV source.
func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
return nil, fmt.Errorf("Provider not supported")
}
// DeletePVSource is a base implementation which deletes PV source.
func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return fmt.Errorf("Provider not supported")
}
// CleanupServiceResources is a base implementation which cleans up service resources.
func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
}
// EnsureLoadBalancerResourcesDeleted is a base implementation which ensures load balancer is deleted.
func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
return nil
}
// LoadBalancerSrcRanges is a base implementation which returns the ranges of ips used by load balancers.
func (n NullProvider) LoadBalancerSrcRanges() []string {
return nil
}
// EnableAndDisableInternalLB is a base implementation which returns functions for enabling/disabling an internal LB.
func (n NullProvider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
nop := func(svc *v1.Service) {}
return nop, nop

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
)
const (
@ -41,8 +41,8 @@ var (
isPSPEnabled bool
)
// Creates a PodSecurityPolicy that allows everything.
func PrivilegedPSP(name string) *policy.PodSecurityPolicy {
// privilegedPSP creates a PodSecurityPolicy that allows everything.
func privilegedPSP(name string) *policy.PodSecurityPolicy {
allowPrivilegeEscalation := true
return &policy.PodSecurityPolicy{
ObjectMeta: metav1.ObjectMeta{
@ -76,6 +76,7 @@ func PrivilegedPSP(name string) *policy.PodSecurityPolicy {
}
}
// IsPodSecurityPolicyEnabled returns true if PodSecurityPolicy is enabled. Otherwise false.
func IsPodSecurityPolicyEnabled(f *Framework) bool {
isPSPEnabledOnce.Do(func() {
psps, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{})
@ -97,7 +98,7 @@ var (
privilegedPSPOnce sync.Once
)
func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
func createPrivilegedPSPBinding(f *Framework, namespace string) {
if !IsPodSecurityPolicyEnabled(f) {
return
}
@ -111,7 +112,7 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
return
}
psp := PrivilegedPSP(podSecurityPolicyPrivileged)
psp := privilegedPSP(podSecurityPolicyPrivileged)
psp, err = f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(psp)
if !apierrs.IsAlreadyExists(err) {
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
@ -135,7 +136,7 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
})
if IsRBACEnabled(f) {
By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
podSecurityPolicyPrivileged, namespace))
BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
podSecurityPolicyPrivileged,