mirror of https://github.com/k3s-io/k3s
Split kubelet e2e resource usage tracking tests
Some tests in this test suite expects --max-pods (i.e. the maximum pod capacity on kubelet) to be greater than default, which applies only to the GCE test environment. Split the tests into two sets so that we can better categorize the tests in the jenkins setup, without making the test itself aware of the environment.pull/6/head
parent
a512111c9d
commit
15cdc45b09
|
@ -112,6 +112,12 @@ GKE_REQUIRED_SKIP_TESTS=(
|
|||
"Shell"
|
||||
"Daemon\sset"
|
||||
"Deployment"
|
||||
"experimental\sresource\susage\stracking" # Expect --max-pods=100
|
||||
)
|
||||
|
||||
# Tests which cannot be run on AWS.
|
||||
AWS_REQUIRED_SKIP_TESTS=(
|
||||
"experimental\sresource\susage\stracking" # Expect --max-pods=100
|
||||
)
|
||||
|
||||
# The following tests are known to be flaky, and are thus run only in their own
|
||||
|
@ -301,6 +307,7 @@ case ${JOB_NAME} in
|
|||
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \
|
||||
${AWS_REQUIRED_SKIP_TESTS[@]:+${AWS_REQUIRED_SKIP_TESTS[@]}} \
|
||||
)"}
|
||||
: ${ENABLE_DEPLOYMENTS:=true}
|
||||
# Override AWS defaults.
|
||||
|
@ -439,6 +446,7 @@ case ${JOB_NAME} in
|
|||
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
|
||||
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
|
||||
)"}
|
||||
;;
|
||||
|
||||
|
@ -455,6 +463,7 @@ case ${JOB_NAME} in
|
|||
${REBOOT_SKIP_TESTS[@]:+${REBOOT_SKIP_TESTS[@]}} \
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \
|
||||
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
|
||||
)"}
|
||||
;;
|
||||
|
||||
|
@ -499,6 +508,7 @@ case ${JOB_NAME} in
|
|||
${GKE_REQUIRED_SKIP_TESTS[@]:+${GKE_REQUIRED_SKIP_TESTS[@]}} \
|
||||
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
|
||||
)"}
|
||||
;;
|
||||
|
||||
|
|
|
@ -50,39 +50,8 @@ func logPodsOnNodes(c *client.Client, nodeNames []string) {
|
|||
}
|
||||
}
|
||||
|
||||
var _ = Describe("Kubelet", func() {
|
||||
var numNodes int
|
||||
var nodeNames sets.String
|
||||
framework := NewFramework("kubelet-perf")
|
||||
var resourceMonitor *resourceMonitor
|
||||
|
||||
BeforeEach(func() {
|
||||
nodes, err := framework.Client.Nodes().List(labels.Everything(), fields.Everything())
|
||||
expectNoError(err)
|
||||
numNodes = len(nodes.Items)
|
||||
nodeNames = sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
nodeNames.Insert(node.Name)
|
||||
}
|
||||
resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingPeriod)
|
||||
resourceMonitor.Start()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
resourceMonitor.Stop()
|
||||
})
|
||||
|
||||
Describe("resource usage tracking", func() {
|
||||
density := []int{0, 50}
|
||||
for i := range density {
|
||||
podsPerNode := density[i]
|
||||
name := fmt.Sprintf(
|
||||
"over %v with %d pods per node.", monitoringTime, podsPerNode)
|
||||
It(name, func() {
|
||||
// Skip this test for GKE.
|
||||
// TODO: Re-activate this for GKE
|
||||
SkipIfProviderIs("gke")
|
||||
|
||||
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, resourceMonitor *resourceMonitor) {
|
||||
numNodes := nodeNames.Len()
|
||||
totalPods := podsPerNode * numNodes
|
||||
By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
|
||||
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))
|
||||
|
@ -127,6 +96,47 @@ var _ = Describe("Kubelet", func() {
|
|||
|
||||
By("Deleting the RC")
|
||||
DeleteRC(framework.Client, framework.Namespace.Name, rcName)
|
||||
}
|
||||
|
||||
var _ = Describe("Kubelet", func() {
|
||||
var nodeNames sets.String
|
||||
framework := NewFramework("kubelet-perf")
|
||||
var resourceMonitor *resourceMonitor
|
||||
|
||||
BeforeEach(func() {
|
||||
nodes, err := framework.Client.Nodes().List(labels.Everything(), fields.Everything())
|
||||
expectNoError(err)
|
||||
nodeNames = sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
nodeNames.Insert(node.Name)
|
||||
}
|
||||
resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingPeriod)
|
||||
resourceMonitor.Start()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
resourceMonitor.Stop()
|
||||
})
|
||||
|
||||
Describe("regular resource usage tracking", func() {
|
||||
density := []int{0, 35}
|
||||
for i := range density {
|
||||
podsPerNode := density[i]
|
||||
name := fmt.Sprintf(
|
||||
"over %v with %d pods per node.", monitoringTime, podsPerNode)
|
||||
It(name, func() {
|
||||
runResourceTrackingTest(framework, podsPerNode, nodeNames, resourceMonitor)
|
||||
})
|
||||
}
|
||||
})
|
||||
Describe("experimental resource usage tracking", func() {
|
||||
density := []int{50}
|
||||
for i := range density {
|
||||
podsPerNode := density[i]
|
||||
name := fmt.Sprintf(
|
||||
"over %v with %d pods per node.", monitoringTime, podsPerNode)
|
||||
It(name, func() {
|
||||
runResourceTrackingTest(framework, podsPerNode, nodeNames, resourceMonitor)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue