mirror of https://github.com/k3s-io/k3s
commit
70049d10d8
|
@ -63,22 +63,22 @@ type ResourceConsumer struct {
|
|||
requestSizeInMegabytes int
|
||||
}
|
||||
|
||||
func NewDynamicResourceConsumer(name string, replicas, initCPU, initMemory int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||
return newResourceConsumer(name, replicas, initCPU, initMemory, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, cpuLimit, memLimit, framework)
|
||||
func NewDynamicResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||
return newResourceConsumer(name, replicas, initCPUTotal, initMemoryTotal, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, cpuLimit, memLimit, framework)
|
||||
}
|
||||
|
||||
func NewStaticResourceConsumer(name string, replicas, initCPU, initMemory int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||
return newResourceConsumer(name, replicas, initCPU, initMemory, staticConsumptionTimeInSeconds, initCPU/replicas, initMemory/replicas, cpuLimit, memLimit, framework)
|
||||
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||
return newResourceConsumer(name, replicas, initCPUTotal, initMemoryTotal, staticConsumptionTimeInSeconds, initCPUTotal/replicas, initMemoryTotal/replicas, cpuLimit, memLimit, framework)
|
||||
}
|
||||
|
||||
/*
|
||||
NewResourceConsumer creates new ResourceConsumer
|
||||
initCPU argument is in millicores
|
||||
initMemory argument is in megabytes
|
||||
initCPUTotal argument is in millicores
|
||||
initMemoryTotal argument is in megabytes
|
||||
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
|
||||
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
||||
*/
|
||||
func newResourceConsumer(name string, replicas, initCPU, initMemory, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||
func newResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||
runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas, cpuLimit, memLimit)
|
||||
rc := &ResourceConsumer{
|
||||
name: name,
|
||||
|
@ -93,9 +93,9 @@ func newResourceConsumer(name string, replicas, initCPU, initMemory, consumption
|
|||
requestSizeInMegabytes: requestSizeInMegabytes,
|
||||
}
|
||||
go rc.makeConsumeCPURequests()
|
||||
rc.ConsumeCPU(initCPU)
|
||||
rc.ConsumeCPU(initCPUTotal)
|
||||
go rc.makeConsumeMemRequests()
|
||||
rc.ConsumeMem(initMemory)
|
||||
rc.ConsumeMem(initMemoryTotal)
|
||||
return rc
|
||||
}
|
||||
|
||||
|
|
|
@ -61,11 +61,13 @@ var _ = Describe("Autoscaling", func() {
|
|||
setUpAutoscaler("cpu/node_utilization", 0.4, nodeCount, nodeCount+1)
|
||||
|
||||
// Consume 50% CPU
|
||||
millicoresPerReplica := 500
|
||||
rc := NewStaticResourceConsumer("cpu-utilization", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)
|
||||
expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout))
|
||||
rcs := createConsumingRCs(f, "cpu-utilization", nodeCount*coresPerNode, 500, 0)
|
||||
err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)
|
||||
for _, rc := range rcs {
|
||||
rc.CleanUp()
|
||||
}
|
||||
expectNoError(err)
|
||||
|
||||
rc.CleanUp()
|
||||
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
|
||||
})
|
||||
|
||||
|
@ -84,10 +86,13 @@ var _ = Describe("Autoscaling", func() {
|
|||
|
||||
// Consume 60% of total memory capacity
|
||||
megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
|
||||
rc := NewStaticResourceConsumer("mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)
|
||||
expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout))
|
||||
rcs := createConsumingRCs(f, "mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica)
|
||||
err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)
|
||||
for _, rc := range rcs {
|
||||
rc.CleanUp()
|
||||
}
|
||||
expectNoError(err)
|
||||
|
||||
rc.CleanUp()
|
||||
expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
|
||||
})
|
||||
|
||||
|
@ -116,6 +121,15 @@ func setUpAutoscaler(metric string, target float64, min, max int) {
|
|||
expectNoError(err, "Output: "+string(out))
|
||||
}
|
||||
|
||||
func createConsumingRCs(f *Framework, name string, count, cpuPerReplica, memPerReplica int) []*ResourceConsumer {
|
||||
var res []*ResourceConsumer
|
||||
for i := 1; i <= count; i++ {
|
||||
name := fmt.Sprintf("%s-%d", name, i)
|
||||
res = append(res, NewStaticResourceConsumer(name, 1, cpuPerReplica, memPerReplica, int64(cpuPerReplica), int64(memPerReplica+100), f))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func cleanUpAutoscaler() {
|
||||
By("Removing autoscaler")
|
||||
out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "stop-autoscaling",
|
Loading…
Reference in New Issue