mirror of https://github.com/k3s-io/k3s
Merge pull request #61959 from satyasm/ipam-perf-cloud-mock
Automatic merge from submit-queue (batch tested with PRs 61959, 62037). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Support custom test configurations **What this PR does / why we need it**: **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: **Release note**: ```release-note Support custom test configuration for IPAM performance integration tests ```pull/8/head
commit
b6c721d7ac
|
@ -60,6 +60,7 @@ go_library(
|
|||
"//vendor/google.golang.org/api/compute/v0.beta:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
|
|
|
@ -29,10 +29,18 @@ The runner scripts support a few different options:
|
|||
```shell
|
||||
./test-performance.sh -h
|
||||
usage: ./test-performance.sh [-h] [-d] [-r <pattern>] [-o <filename>]
|
||||
usage: ./test-performance.sh <options>
|
||||
-h display this help message
|
||||
-d enable debug logs in tests
|
||||
-r <pattern> regex pattern to match for tests
|
||||
-o <filename> file to write JSON formatted results to
|
||||
-p <id> enable cpu and memory profiles, output written to mem-<id>.out and cpu-<id>.out
|
||||
-c enable custom test configuration
|
||||
-a <name> allocator name, one of RangeAllocator, CloudAllocator, IPAMFromCluster, IPAMFromCloud
|
||||
-k <num> api server qps for allocator
|
||||
-n <num> number of nodes to simulate
|
||||
-m <num> api server qps for node creation
|
||||
-l <num> gce cloud endpoint qps
|
||||
```
|
||||
|
||||
The tests follow the pattern TestPerformance/{AllocatorType}-KubeQPS{X}-Nodes{Y}, where AllocatorType
|
||||
|
@ -56,6 +64,19 @@ So to run the test for CloudAllocator with 10 nodes, one can run
|
|||
At the end of the test, a JSON format of the results for all the tests run is printed. Passing the -o option
|
||||
allows for also saving this JSON to a named file.
|
||||
|
||||
### Profiling the code
|
||||
It's possible to get the CPU and memory profiles of code during test execution by using the ```-p``` option.
|
||||
The CPU and memory profiles are generated in the same directory with the file names set to ```cpu-<id>.out```
|
||||
and ```cpu-<id>.out```, where ```<id>``` is the argument value. Typicall pattern is to put in the number
|
||||
of nodes being simulated as the id, or 'all' in case running the full suite.
|
||||
|
||||
### Custom Test Configuration
|
||||
It's also possible to run a custom test configuration by passing the -c option. With this option, it then
|
||||
possible to specify the number of nodes to simulate and the API server qps values for creation,
|
||||
IPAM allocation and cloud endpoint, along with the allocator name to run. The defaults values for the
|
||||
qps parmeters are 30 for IPAM allocation, 100 for node creation and 30 for the cloud endpoint, and the
|
||||
default allocator is the RangeAllocator.
|
||||
|
||||
Code Organization
|
||||
-----
|
||||
The core of the tests are defined in [ipam_test.go](ipam_test.go), using the t.Run() helper to control parallelism
|
||||
|
|
|
@ -126,9 +126,13 @@ func TestPerformance(t *testing.T) {
|
|||
tests []*Config
|
||||
)
|
||||
|
||||
for _, numNodes := range []int{10, 100} {
|
||||
for _, alloc := range []ipam.CIDRAllocatorType{ipam.RangeAllocatorType, ipam.CloudAllocatorType, ipam.IPAMFromClusterAllocatorType, ipam.IPAMFromCloudAllocatorType} {
|
||||
tests = append(tests, &Config{AllocatorType: alloc, NumNodes: numNodes, CreateQPS: numNodes, KubeQPS: 10, CloudQPS: 10})
|
||||
if isCustom {
|
||||
tests = append(tests, customConfig)
|
||||
} else {
|
||||
for _, numNodes := range []int{10, 100} {
|
||||
for _, alloc := range []ipam.CIDRAllocatorType{ipam.RangeAllocatorType, ipam.CloudAllocatorType, ipam.IPAMFromClusterAllocatorType, ipam.IPAMFromCloudAllocatorType} {
|
||||
tests = append(tests, &Config{AllocatorType: alloc, NumNodes: numNodes, CreateQPS: numNodes, KubeQPS: 10, CloudQPS: 10})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,13 +20,47 @@ import (
|
|||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
var resultsLogFile string
|
||||
var (
|
||||
resultsLogFile string
|
||||
isCustom bool
|
||||
customConfig = &Config{
|
||||
NumNodes: 10,
|
||||
KubeQPS: 30,
|
||||
CloudQPS: 30,
|
||||
CreateQPS: 100,
|
||||
AllocatorType: ipam.RangeAllocatorType,
|
||||
}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
allocator := string(ipam.RangeAllocatorType)
|
||||
|
||||
flag.StringVar(&resultsLogFile, "log", "", "log file to write JSON results to")
|
||||
flag.BoolVar(&isCustom, "custom", false, "enable custom test configuration")
|
||||
flag.StringVar(&allocator, "allocator", allocator, "allocator to use")
|
||||
flag.IntVar(&customConfig.KubeQPS, "kube-qps", customConfig.KubeQPS, "API server qps for allocations")
|
||||
flag.IntVar(&customConfig.NumNodes, "num-nodes", 10, "number of nodes to simulate")
|
||||
flag.IntVar(&customConfig.CreateQPS, "create-qps", customConfig.CreateQPS, "API server qps for node creation")
|
||||
flag.IntVar(&customConfig.CloudQPS, "cloud-qps", customConfig.CloudQPS, "GCE Cloud qps limit")
|
||||
flag.Parse()
|
||||
|
||||
switch allocator {
|
||||
case string(ipam.RangeAllocatorType):
|
||||
customConfig.AllocatorType = ipam.RangeAllocatorType
|
||||
case string(ipam.CloudAllocatorType):
|
||||
customConfig.AllocatorType = ipam.CloudAllocatorType
|
||||
case string(ipam.IPAMFromCloudAllocatorType):
|
||||
customConfig.AllocatorType = ipam.IPAMFromCloudAllocatorType
|
||||
case string(ipam.IPAMFromClusterAllocatorType):
|
||||
customConfig.AllocatorType = ipam.IPAMFromClusterAllocatorType
|
||||
default:
|
||||
glog.Fatalf("Unknown allocator type: %s", allocator)
|
||||
}
|
||||
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func NewObserver(clientSet *clientset.Clientset, numNodes int) *Observer {
|
|||
// StartObserving starts an asynchronous loop to monitor for node changes.
|
||||
// Call Results() to get the test results after starting observer.
|
||||
func (o *Observer) StartObserving() error {
|
||||
go o.monitor()
|
||||
o.monitor()
|
||||
glog.Infof("Test observer started")
|
||||
return nil
|
||||
}
|
||||
|
@ -173,6 +173,9 @@ func (o *Observer) monitor() {
|
|||
nTime.allocated = time.Now()
|
||||
nTime.podCIDR = newNode.Spec.PodCIDR
|
||||
o.numAllocated++
|
||||
if o.numAllocated%10 == 0 {
|
||||
glog.Infof("progress: %d/%d - %.2d%%", o.numAllocated, o.numNodes, (o.numAllocated * 100.0 / o.numNodes))
|
||||
}
|
||||
// do following check only if numAllocated is modified, as otherwise, redundant updates
|
||||
// can cause wg.Done() to be called multiple times, causing a panic
|
||||
if o.numAdded == o.numNodes && o.numAllocated == o.numNodes {
|
||||
|
|
|
@ -20,17 +20,25 @@ set -o pipefail
|
|||
|
||||
TEST_ARGS=""
|
||||
RUN_PATTERN=".*"
|
||||
PROFILE_OPTS=""
|
||||
|
||||
function usage() {
|
||||
echo "usage: $0 [-h] [-d] [-r <pattern>] [-o <filename>]"
|
||||
echo "usage: $0 <options>"
|
||||
echo " -h display this help message"
|
||||
echo " -d enable debug logs in tests"
|
||||
echo " -r <pattern> regex pattern to match for tests"
|
||||
echo " -o <filename> file to write JSON formatted results to"
|
||||
echo " -p <id> enable cpu and memory profiles, output written to mem-<id>.out and cpu-<id>.out"
|
||||
echo " -c enable custom test configuration"
|
||||
echo " -a <name> allocator name, one of RangeAllocator, CloudAllocator, IPAMFromCluster, IPAMFromCloud"
|
||||
echo " -k <num> api server qps for allocator"
|
||||
echo " -n <num> number of nodes to simulate"
|
||||
echo " -m <num> api server qps for node creation"
|
||||
echo " -l <num> gce cloud endpoint qps"
|
||||
exit 1
|
||||
}
|
||||
|
||||
while getopts ":hdr:o:" opt; do
|
||||
while getopts ":hdr:o:p:ca:k:n:m:l:" opt; do
|
||||
case ${opt} in
|
||||
d) TEST_ARGS="${TEST_ARGS} -v=6"
|
||||
;;
|
||||
|
@ -38,9 +46,23 @@ while getopts ":hdr:o:" opt; do
|
|||
;;
|
||||
o) TEST_ARGS="${TEST_ARGS} -log ${OPTARG}"
|
||||
;;
|
||||
h) ::usage
|
||||
p) PROFILE_OPTS="-memprofile mem-${OPTARG}.out -cpuprofile cpu-${OPTARG}.out"
|
||||
;;
|
||||
\?) ::usage
|
||||
c) TEST_ARGS="${TEST_ARGS} -custom"
|
||||
;;
|
||||
a) TEST_ARGS="${TEST_ARGS} -allocator ${OPTARG}"
|
||||
;;
|
||||
k) TEST_ARGS="${TEST_ARGS} -kube-qps ${OPTARG}"
|
||||
;;
|
||||
n) TEST_ARGS="${TEST_ARGS} -num-nodes ${OPTARG}"
|
||||
;;
|
||||
m) TEST_ARGS="${TEST_ARGS} -create-qps ${OPTARG}"
|
||||
;;
|
||||
l) TEST_ARGS="${TEST_ARGS} -cloud-qps ${OPTARG}"
|
||||
;;
|
||||
h) usage
|
||||
;;
|
||||
\?) usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
@ -65,5 +87,5 @@ kube::etcd::start
|
|||
|
||||
# Running IPAM tests. It might take a long time.
|
||||
kube::log::status "performance test (IPAM) start"
|
||||
go test -test.run=${RUN_PATTERN} -test.timeout=60m -test.short=false -v -args ${TEST_ARGS}
|
||||
go test ${PROFILE_OPTS} -test.run=${RUN_PATTERN} -test.timeout=60m -test.short=false -v -args ${TEST_ARGS}
|
||||
kube::log::status "... IPAM tests finished."
|
||||
|
|
|
@ -17,8 +17,11 @@ limitations under the License.
|
|||
package ipamperf
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
@ -26,6 +29,11 @@ import (
|
|||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
)
|
||||
|
||||
const (
|
||||
maxCreateRetries = 10
|
||||
retryDelay = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
baseNodeTemplate = &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -71,7 +79,17 @@ func createNodes(apiURL string, config *Config) error {
|
|||
})
|
||||
glog.Infof("Creating %d nodes", config.NumNodes)
|
||||
for i := 0; i < config.NumNodes; i++ {
|
||||
if _, err := clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil {
|
||||
var err error
|
||||
for j := 0; j < maxCreateRetries; j++ {
|
||||
if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && errors.IsServerTimeout(err) {
|
||||
glog.Infof("Server timeout creating nodes, retrying after %v", retryDelay)
|
||||
time.Sleep(retryDelay)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Error creating nodes: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue