mirror of https://github.com/k3s-io/k3s
Merge pull request #54990 from shyamjvs/retry-pod-list-in-load-test
Automatic merge from submit-queue (batch tested with PRs 55169, 54990). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Retry pod listing call in load test if possible instead of failing The latest run of 5k-node performance test failed due to this (https://k8s-gubernator.appspot.com/build/kubernetes-jenkins/logs/ci-kubernetes-e2e-gce-scale-performance/57): ``` listing pods from rc load-small-10363 Expected error: ... Get https://35.196.185.248/api/v1/namespaces/e2e-tests-load-30-nodepods-14-f9gcv/pods?labelSelector=name%3Dload-small-10363&resourceVersion=0: read tcp 172.17.0.5:40524->35.196.185.248:443: read: connection reset by peer not to have occurred ``` /cc @wojtek-t @porridgepull/6/head
commit
a8fc7f691f
|
@ -35,6 +35,7 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
|
@ -523,6 +524,16 @@ func sleepUpTo(d time.Duration) {
|
|||
}
|
||||
}
|
||||
|
||||
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
|
||||
backoff := wait.Backoff{
|
||||
Duration: initialDuration,
|
||||
Factor: 3,
|
||||
Jitter: 0,
|
||||
Steps: 6,
|
||||
}
|
||||
return wait.ExponentialBackoff(backoff, fn)
|
||||
}
|
||||
|
||||
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(configs))
|
||||
|
@ -559,15 +570,26 @@ func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scaling
|
|||
newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2)
|
||||
framework.ExpectNoError(framework.ScaleResource(
|
||||
config.GetClient(), config.GetInternalClient(), config.GetNamespace(), config.GetName(), newSize, true, config.GetKind()),
|
||||
fmt.Sprintf("scaling rc %s for the first time", config.GetName()))
|
||||
fmt.Sprintf("scaling %v %v", config.GetKind(), config.GetName()))
|
||||
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()}))
|
||||
options := metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: "0",
|
||||
}
|
||||
_, err := config.GetClient().CoreV1().Pods(config.GetNamespace()).List(options)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.GetName()))
|
||||
listResourcePodsFunc := func() (bool, error) {
|
||||
_, err := config.GetClient().CoreV1().Pods(config.GetNamespace()).List(options)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Failed to list pods from %v %v due to: %v", config.GetKind(), config.GetName(), err)
|
||||
if framework.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to list pods from %v %v with non-retriable error: %v", config.GetKind(), config.GetName(), err)
|
||||
}
|
||||
err := retryWithExponentialBackOff(100*time.Millisecond, listResourcePodsFunc)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration) {
|
||||
|
|
Loading…
Reference in New Issue