Merge pull request #55798 from shyamjvs/exclude-for-scale-tests-tag

Automatic merge from submit-queue (batch tested with PRs 55798, 49579, 54862, 55188, 51990). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add special tag for disabling ESIPP and HPA-related tests on large clusters

As discussed offline, this would help improve accountability for tests needing some love from scalability perspective.

/cc @porridge 
fyi - @MrHohn @MaciekPytel @mwielgus @crassirostris 

@kubernetes/sig-scalability-misc
pull/6/head
Kubernetes Submit Queue 2017-11-16 03:09:07 -08:00 committed by GitHub
commit f9ce9d9da6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 8 additions and 16 deletions

View File

@ -66,7 +66,8 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
})
})
SIGDescribe("ReplicationController light", func() {
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #54637 is fixed.
SIGDescribe("[DisabledForLargeClusters] ReplicationController light", func() {
It("Should scale from 1 pod to 2 pods", func() {
scaleTest := &HPAScaleTest{
initPods: 1,

View File

@ -481,14 +481,10 @@ var _ = SIGDescribe("Services", func() {
}
})
It("should be able to change the type and ports of a service [Slow]", func() {
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #52495 is fixed.
It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() {
// requires cloud load-balancer support
framework.SkipUnlessProviderIs("gce", "gke", "aws")
if framework.ProviderIs("gke", "gce") {
// Skipping this test for too large clusters due to issue #52495.
// TODO(MrHohn): Get rid of this when gce-side load-balancer improvements are done.
framework.SkipUnlessNodeCountIsAtMost(framework.GCPMaxInstancesInInstanceGroup)
}
loadBalancerSupportsUDP := !framework.ProviderIs("aws")
@ -1403,11 +1399,9 @@ var _ = SIGDescribe("Services", func() {
framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPodName, svcIP)
})
It("should be able to create an internal type load balancer [Slow]", func() {
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #52495 is fixed.
It("should be able to create an internal type load balancer [Slow] [DisabledForLargeClusters]", func() {
framework.SkipUnlessProviderIs("azure", "gke", "gce")
if framework.ProviderIs("gke", "gce") {
framework.SkipUnlessNodeCountIsAtMost(framework.GCPMaxInstancesInInstanceGroup)
}
createTimeout := framework.LoadBalancerCreateTimeoutDefault
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber {
@ -1491,7 +1485,8 @@ var _ = SIGDescribe("Services", func() {
})
})
var _ = SIGDescribe("ESIPP [Slow]", func() {
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #52495 is fixed.
var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
f := framework.NewDefaultFramework("esipp")
loadBalancerCreateTimeout := framework.LoadBalancerCreateTimeoutDefault
@ -1502,10 +1497,6 @@ var _ = SIGDescribe("ESIPP [Slow]", func() {
// requires cloud load-balancer support - this feature currently supported only on GCE/GKE
framework.SkipUnlessProviderIs("gce", "gke")
// Skipping this test for too large clusters due to issue #52495.
// TODO(MrHohn): Get rid of this when gce-side load-balancer improvements are done.
framework.SkipUnlessNodeCountIsAtMost(framework.GCPMaxInstancesInInstanceGroup)
cs = f.ClientSet
if nodes := framework.GetReadySchedulableNodesOrDie(cs); len(nodes.Items) > framework.LargeClusterMinNodesNumber {
loadBalancerCreateTimeout = framework.LoadBalancerCreateTimeoutLarge