Merge pull request #26631 from gmarek/constraints

Automatic merge from submit-queue

Revert revert of adding resource constraints for master components in density tests

The problem was the time when resource constraints were generated. It turns out that the provider is not set there. This version should work.

cc @roberthbailey @alex-mohr
pull/6/head
k8s-merge-robot 2016-06-02 05:15:55 -07:00
commit 61fb527801
1 changed files with 66 additions and 2 deletions

View File

@ -50,7 +50,57 @@ const (
// Maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0
func density30AddonResourceVerifier() map[string]framework.ResourceConstraint {
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
var apiserverMem uint64
var controllerMem uint64
var schedulerMem uint64
apiserverCPU := math.MaxFloat32
apiserverMem = math.MaxUint64
controllerCPU := math.MaxFloat32
controllerMem = math.MaxUint64
schedulerCPU := math.MaxFloat32
schedulerMem = math.MaxUint64
if framework.ProviderIs("kubemark") {
if numNodes <= 5 {
apiserverCPU = 0.15
apiserverMem = 150 * (1024 * 1024)
controllerCPU = 0.1
controllerMem = 100 * (1024 * 1024)
schedulerCPU = 0.05
schedulerMem = 50 * (1024 * 1024)
} else if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1500 * (1024 * 1024)
controllerCPU = 0.75
controllerMem = 750 * (1024 * 1024)
schedulerCPU = 0.75
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 500 {
apiserverCPU = 2.25
apiserverMem = 2500 * (1024 * 1024)
controllerCPU = 1.0
controllerMem = 1100 * (1024 * 1024)
schedulerCPU = 0.8
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 1000 {
apiserverCPU = 4
apiserverMem = 4000 * (1024 * 1024)
controllerCPU = 3
controllerMem = 2000 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 750 * (1024 * 1024)
}
} else {
if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1300 * (1024 * 1024)
controllerCPU = 0.5
controllerMem = 300 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 150 * (1024 * 1024)
}
}
constraints := make(map[string]framework.ResourceConstraint)
constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
@ -81,6 +131,18 @@ func density30AddonResourceVerifier() map[string]framework.ResourceConstraint {
CPUConstraint: 2,
MemoryConstraint: 500 * (1024 * 1024),
}
constraints["kube-apiserver"] = framework.ResourceConstraint{
CPUConstraint: apiserverCPU,
MemoryConstraint: apiserverMem,
}
constraints["kube-controller-manager"] = framework.ResourceConstraint{
CPUConstraint: controllerCPU,
MemoryConstraint: controllerMem,
}
constraints["kube-scheduler"] = framework.ResourceConstraint{
CPUConstraint: schedulerCPU,
MemoryConstraint: schedulerMem,
}
return constraints
}
@ -166,6 +228,9 @@ var _ = framework.KubeDescribe("Density", func() {
nodes := framework.GetReadySchedulableNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
if nodeCount == 30 {
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
}
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue()
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value()
@ -218,7 +283,6 @@ var _ = framework.KubeDescribe("Density", func() {
switch testArg.podsPerNode {
case 30:
name = "[Feature:Performance] " + name
f.AddonResourceConstraints = density30AddonResourceVerifier()
case 95:
name = "[Feature:HighDensityPerformance]" + name
default: