From 126a0667c514e5e0f0aae7b4216c334fa1c983d2 Mon Sep 17 00:00:00 2001 From: hangaoshuai Date: Thu, 1 Mar 2018 09:48:00 +0800 Subject: [PATCH] fix todo:Get rid of this duplicate function IsRetryableAPIError in favour of the one in test/utils --- test/e2e/framework/ingress_utils.go | 2 +- test/e2e/framework/util.go | 90 +++++++++++------------- test/e2e/scalability/load.go | 2 +- test/integration/framework/perf_utils.go | 2 +- 4 files changed, 45 insertions(+), 51 deletions(-) diff --git a/test/e2e/framework/ingress_utils.go b/test/e2e/framework/ingress_utils.go index b04693c62a..5d461d980a 100644 --- a/test/e2e/framework/ingress_utils.go +++ b/test/e2e/framework/ingress_utils.go @@ -1305,7 +1305,7 @@ func (j *IngressTestJig) WaitForIngressAddress(c clientset.Interface, ns, ingNam ipOrNameList, err := getIngressAddress(c, ns, ingName, j.Class) if err != nil || len(ipOrNameList) == 0 { j.Logger.Errorf("Waiting for Ingress %v to acquire IP, error %v", ingName, err) - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 3d7b067421..456133a9f7 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -62,7 +62,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -102,7 +101,7 @@ import ( taintutils "k8s.io/kubernetes/pkg/util/taints" utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" - testutil "k8s.io/kubernetes/test/utils" + testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" uexec "k8s.io/utils/exec" ) @@ -560,7 +559,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()}) if err != nil { Logf("Error getting pods in namespace %q: %v", ns, err) - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -623,7 +622,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting replication controllers in namespace '%s': %v", ns, err) - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -636,7 +635,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting replication sets in namespace %q: %v", ns, err) - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -649,7 +648,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) if err != nil { Logf("Error getting pods in namespace '%s': %v", ns, err) - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -662,7 +661,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { continue } - res, err := testutil.PodRunningReady(&pod) + res, err := testutils.PodRunningReady(&pod) switch { case res && err == nil: nOk++ @@ -728,7 +727,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri } logFunc("Running kubectl logs on non-ready containers in %v", ns) for _, pod := range podList.Items { - if res, err := testutil.PodRunningReady(&pod); !res || err != nil { + if res, err := testutils.PodRunningReady(&pod); !res || err != nil { kubectlLogPod(c, pod, "", Logf) } } @@ -1572,7 +1571,7 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns).List(options) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -1626,7 +1625,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i case apierrs.IsNotFound(err): Logf("Service %s in namespace %s disappeared.", name, namespace) return !exist, nil - case !IsRetryableAPIError(err): + case !testutils.IsRetryableAPIError(err): Logf("Non-retryable failure while getting service.") return false, err default: @@ -1653,7 +1652,7 @@ func WaitForServiceWithSelector(c clientset.Interface, namespace string, selecto case len(services.Items) == 0: Logf("Service with %s in namespace %s disappeared.", selector.String(), namespace) return !exist, nil - case !IsRetryableAPIError(err): + case !testutils.IsRetryableAPIError(err): Logf("Non-retryable failure while listing service.") return false, err default: @@ -2489,7 +2488,7 @@ func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList { "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -2571,7 +2570,7 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er nodes, err := c.CoreV1().Nodes().List(opts) if err != nil { Logf("Unexpected error listing nodes: %v", err) - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -2643,7 +2642,7 @@ func GetNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) { } func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) { - ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) + ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) } func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName string, labelKey, labelValue string) string { @@ -2651,7 +2650,7 @@ func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName str node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) ExpectNoError(err) oldValue = node.Labels[labelKey] - ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) + ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue})) return oldValue } @@ -2675,10 +2674,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Tai // won't fail if target label doesn't exist or has been removed. func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) { By("removing the label " + labelKey + " off the node " + nodeName) - ExpectNoError(testutil.RemoveLabelOffNode(c, nodeName, []string{labelKey})) + ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey})) By("verifying the node doesn't have the label " + labelKey) - ExpectNoError(testutil.VerifyLabelsRemoved(c, nodeName, []string{labelKey})) + ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey})) } func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) { @@ -2717,7 +2716,7 @@ func AddOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -2748,7 +2747,7 @@ func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) { err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) { node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -2783,7 +2782,7 @@ func ScaleResource( ) error { By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size)) scaler := kubectl.ScalerFor(kind, internalClientset.Batch(), scalesGetter, gr) - if err := testutil.ScaleResourceWithRetries(scaler, ns, name, size); err != nil { + if err := testutils.ScaleResourceWithRetries(scaler, ns, name, size); err != nil { return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err) } if !wait { @@ -2802,7 +2801,7 @@ func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind s if err != nil { return err } - err = testutil.WaitForPodsWithLabelRunning(c, ns, selector) + err = testutils.WaitForPodsWithLabelRunning(c, ns, selector) if err != nil { return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err) } @@ -2824,7 +2823,7 @@ func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.G // Returns true if all the specified pods are scheduled, else returns false. func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) { - PodStore := testutil.NewPodStore(c, ns, label, fields.Everything()) + PodStore := testutils.NewPodStore(c, ns, label, fields.Everything()) defer PodStore.Stop() pods := PodStore.List() if len(pods) == 0 { @@ -2863,7 +2862,7 @@ func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selecto options := metav1.ListOptions{LabelSelector: label.String()} pods, err = c.CoreV1().Pods(ns).List(options) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { continue } return @@ -2887,14 +2886,14 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la pods, err := WaitForPodsWithLabel(c, ns, label) if err != nil { Logf("Failed to list pods: %v", err) - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err } current = 0 for _, pod := range pods.Items { - if flag, err := testutil.PodRunningReady(&pod); err == nil && flag == true { + if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true { current++ } } @@ -3119,8 +3118,8 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns // podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector. // It waits until the reflector does a List() before returning. -func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutil.PodStore, error) { - ps := testutil.NewPodStore(c, ns, selector, fields.Everything()) +func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutils.PodStore, error) { + ps := testutils.NewPodStore(c, ns, selector, fields.Everything()) err := wait.Poll(100*time.Millisecond, 2*time.Minute, func() (bool, error) { if len(ps.Reflector.LastSyncResourceVersion()) != 0 { return true, nil @@ -3134,7 +3133,7 @@ func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selec // This is to make a fair comparison of deletion time between DeleteRCAndPods // and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas // when the pod is inactvie. -func waitForPodsInactive(ps *testutil.PodStore, interval, timeout time.Duration) error { +func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { pods := ps.List() for _, pod := range pods { @@ -3147,7 +3146,7 @@ func waitForPodsInactive(ps *testutil.PodStore, interval, timeout time.Duration) } // waitForPodsGone waits until there are no pods left in the PodStore. -func waitForPodsGone(ps *testutil.PodStore, interval, timeout time.Duration) error { +func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { return wait.PollImmediate(interval, timeout, func() (bool, error) { if pods := ps.List(); len(pods) == 0 { return true, nil @@ -3214,7 +3213,7 @@ func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a var updateErr error pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) { if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -3473,7 +3472,7 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -3561,14 +3560,14 @@ func GetSigner(provider string) (ssh.Signer, error) { // podNames in namespace ns are running and ready, using c and waiting at most // timeout. func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { - return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReady, "running and ready") + return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") } // CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are // listed in podNames in namespace ns are running and ready, or succeeded; use // c and waiting at most timeout. func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { - return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReadyOrSucceeded, "running and ready, or succeeded") + return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") } // CheckPodsCondition returns whether all pods whose names are listed in podNames @@ -3727,7 +3726,7 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error { // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -3771,7 +3770,7 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -4292,7 +4291,7 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { - if IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, err @@ -4783,22 +4782,22 @@ func ListNamespaceEvents(c clientset.Interface, ns string) error { return nil } -// E2ETestNodePreparer implements testutil.TestNodePreparer interface, which is used +// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used // to create/modify Nodes before running a test. type E2ETestNodePreparer struct { client clientset.Interface // Specifies how many nodes should be modified using the given strategy. // Only one strategy can be applied to a single Node, so there needs to // be at least Nodes in the cluster. - countToStrategy []testutil.CountToStrategy - nodeToAppliedStrategy map[string]testutil.PrepareNodeStrategy + countToStrategy []testutils.CountToStrategy + nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy } -func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutil.CountToStrategy) testutil.TestNodePreparer { +func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer { return &E2ETestNodePreparer{ client: client, countToStrategy: countToStrategy, - nodeToAppliedStrategy: make(map[string]testutil.PrepareNodeStrategy), + nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy), } } @@ -4816,7 +4815,7 @@ func (p *E2ETestNodePreparer) PrepareNodes() error { for _, v := range p.countToStrategy { sum += v.Count for ; index < sum; index++ { - if err := testutil.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { + if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { glog.Errorf("Aborting node preparation: %v", err) return err } @@ -4834,7 +4833,7 @@ func (p *E2ETestNodePreparer) CleanupNodes() error { name := nodes.Items[i].Name strategy, found := p.nodeToAppliedStrategy[name] if found { - if err = testutil.DoCleanupNode(p.client, name, strategy); err != nil { + if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil { glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err) encounteredError = err } @@ -5109,11 +5108,6 @@ func DumpDebugInfo(c clientset.Interface, ns string) { } } -// TODO: Get rid of this duplicate function in favour of the one in test/utils. -func IsRetryableAPIError(err error) bool { - return apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || apierrs.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) -} - // DsFromManifest reads a .json/yaml file and returns the daemonset in it. func DsFromManifest(url string) (*extensions.DaemonSet, error) { var controller extensions.DaemonSet diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index 897cb24a41..38bf5970c5 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -670,7 +670,7 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling return true, nil } framework.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err) - if framework.IsRetryableAPIError(err) { + if testutils.IsRetryableAPIError(err) { return false, nil } return false, fmt.Errorf("Failed to list pods from %v %v with non-retriable error: %v", config.GetKind(), config.GetName(), err) diff --git a/test/integration/framework/perf_utils.go b/test/integration/framework/perf_utils.go index 2336b2a5a8..8a270bcc9d 100644 --- a/test/integration/framework/perf_utils.go +++ b/test/integration/framework/perf_utils.go @@ -76,7 +76,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { var err error for retry := 0; retry < retries; retry++ { _, err = p.client.CoreV1().Nodes().Create(baseNode) - if err == nil || !e2eframework.IsRetryableAPIError(err) { + if err == nil || !testutils.IsRetryableAPIError(err) { break } }