mirror of https://github.com/k3s-io/k3s
Revert "[hpa] Parameterize tolerance, downscale, and upscale into HPAController, and add corresponding unit test for backsolved tolerance."
parent
4f67b0b211
commit
342eee680c
|
@ -365,11 +365,7 @@ func (s *CMServer) Run(_ []string) error {
|
||||||
metrics.DefaultHeapsterService,
|
metrics.DefaultHeapsterService,
|
||||||
metrics.DefaultHeapsterPort,
|
metrics.DefaultHeapsterPort,
|
||||||
)
|
)
|
||||||
// TODO parameterize tolerance/downscale/upscale options.
|
podautoscaler.NewHorizontalController(hpaClient, metricsClient).
|
||||||
tolerance := 1.0
|
|
||||||
downScale := time.Duration(5) * time.Second
|
|
||||||
upScale := time.Duration(3) * time.Second
|
|
||||||
podautoscaler.NewHorizontalController(kubeClient, metricsClient, tolerance, downScale, upScale).
|
|
||||||
Run(s.HorizontalPodAutoscalerSyncPeriod)
|
Run(s.HorizontalPodAutoscalerSyncPeriod)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,40 +31,30 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Usage shoud exceed the tolerance before we start downscale or upscale the pods.
|
||||||
|
// TODO: make it a flag or HPA spec element.
|
||||||
|
tolerance = 0.1
|
||||||
|
)
|
||||||
|
|
||||||
type HorizontalController struct {
|
type HorizontalController struct {
|
||||||
client client.Interface
|
client client.Interface
|
||||||
metricsClient metrics.MetricsClient
|
metricsClient metrics.MetricsClient
|
||||||
eventRecorder record.EventRecorder
|
eventRecorder record.EventRecorder
|
||||||
tolerance float64
|
|
||||||
downscaleForbiddenWindow time.Duration
|
|
||||||
upscaleForbiddenWindow time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHorizontalController(client client.Interface, metricsClient metrics.MetricsClient, tol float64, dScale, uScale time.Duration) *HorizontalController {
|
var downscaleForbiddenWindow = 5 * time.Minute
|
||||||
|
var upscaleForbiddenWindow = 3 * time.Minute
|
||||||
|
|
||||||
|
func NewHorizontalController(client client.Interface, metricsClient metrics.MetricsClient) *HorizontalController {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster()
|
||||||
broadcaster.StartRecordingToSink(client.Events(""))
|
broadcaster.StartRecordingToSink(client.Events(""))
|
||||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||||
|
|
||||||
if tol < 0 || tol > 1 {
|
|
||||||
glog.Warningf("Invalid tolerance provided %v using default.", tol)
|
|
||||||
tol = .1
|
|
||||||
}
|
|
||||||
if uScale == 0*time.Second {
|
|
||||||
glog.Warningf("Invalid upscale value provided, %v using default.", uScale)
|
|
||||||
uScale = 3 * time.Minute
|
|
||||||
}
|
|
||||||
if dScale == 0*time.Second {
|
|
||||||
glog.Warningf("Invalid downscale value provided, %v using default.", dScale)
|
|
||||||
dScale = 5 * time.Minute
|
|
||||||
}
|
|
||||||
glog.V(2).Infof("Created Horizontal Controller with downscale %v, upscale %v, and tolerance %v", tol, uScale, dScale)
|
|
||||||
return &HorizontalController{
|
return &HorizontalController{
|
||||||
client: client,
|
client: client,
|
||||||
metricsClient: metricsClient,
|
metricsClient: metricsClient,
|
||||||
eventRecorder: recorder,
|
eventRecorder: recorder,
|
||||||
tolerance: tol,
|
|
||||||
downscaleForbiddenWindow: dScale,
|
|
||||||
upscaleForbiddenWindow: uScale,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +83,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.H
|
||||||
}
|
}
|
||||||
|
|
||||||
usageRatio := float64(*currentUtilization) / float64(hpa.Spec.CPUUtilization.TargetPercentage)
|
usageRatio := float64(*currentUtilization) / float64(hpa.Spec.CPUUtilization.TargetPercentage)
|
||||||
if math.Abs(1.0-usageRatio) > a.tolerance {
|
if math.Abs(1.0-usageRatio) > tolerance {
|
||||||
return int(math.Ceil(usageRatio * float64(currentReplicas))), currentUtilization, timestamp, nil
|
return int(math.Ceil(usageRatio * float64(currentReplicas))), currentUtilization, timestamp, nil
|
||||||
} else {
|
} else {
|
||||||
return currentReplicas, currentUtilization, timestamp, nil
|
return currentReplicas, currentUtilization, timestamp, nil
|
||||||
|
@ -135,7 +125,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
|
||||||
// and there was no rescaling in the last downscaleForbiddenWindow.
|
// and there was no rescaling in the last downscaleForbiddenWindow.
|
||||||
if desiredReplicas < currentReplicas &&
|
if desiredReplicas < currentReplicas &&
|
||||||
(hpa.Status.LastScaleTime == nil ||
|
(hpa.Status.LastScaleTime == nil ||
|
||||||
hpa.Status.LastScaleTime.Add(a.downscaleForbiddenWindow).Before(timestamp)) {
|
hpa.Status.LastScaleTime.Add(downscaleForbiddenWindow).Before(timestamp)) {
|
||||||
rescale = true
|
rescale = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +133,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
|
||||||
// and there was no rescaling in the last upscaleForbiddenWindow.
|
// and there was no rescaling in the last upscaleForbiddenWindow.
|
||||||
if desiredReplicas > currentReplicas &&
|
if desiredReplicas > currentReplicas &&
|
||||||
(hpa.Status.LastScaleTime == nil ||
|
(hpa.Status.LastScaleTime == nil ||
|
||||||
hpa.Status.LastScaleTime.Add(a.upscaleForbiddenWindow).Before(timestamp)) {
|
hpa.Status.LastScaleTime.Add(upscaleForbiddenWindow).Before(timestamp)) {
|
||||||
rescale = true
|
rescale = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -33,14 +32,9 @@ import (
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
|
||||||
glog "github.com/golang/glog"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
heapster "k8s.io/heapster/api/v1/types"
|
heapster "k8s.io/heapster/api/v1/types"
|
||||||
)
|
|
||||||
|
|
||||||
// unit tests need tolerance awareness to calibrate.
|
"github.com/stretchr/testify/assert"
|
||||||
const (
|
|
||||||
tolerance = .1
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
|
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
|
||||||
|
@ -212,7 +206,7 @@ func (tc *testCase) verifyResults(t *testing.T) {
|
||||||
func (tc *testCase) runTest(t *testing.T) {
|
func (tc *testCase) runTest(t *testing.T) {
|
||||||
testClient := tc.prepareTestClient(t)
|
testClient := tc.prepareTestClient(t)
|
||||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||||
hpaController := NewHorizontalController(testClient, metricsClient, tolerance, time.Second, time.Second)
|
hpaController := NewHorizontalController(testClient, metricsClient)
|
||||||
err := hpaController.reconcileAutoscalers()
|
err := hpaController.reconcileAutoscalers()
|
||||||
assert.Equal(t, nil, err)
|
assert.Equal(t, nil, err)
|
||||||
if tc.verifyEvents {
|
if tc.verifyEvents {
|
||||||
|
@ -366,64 +360,4 @@ func TestEventNotCreated(t *testing.T) {
|
||||||
tc.runTest(t)
|
tc.runTest(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestComputedToleranceAlgImplementation is a regression test which
|
// TODO: add more tests
|
||||||
// back-calculates a minimal percentage for downscaling based on a small percentage
|
|
||||||
// increase in pod utilization which is calibrated against the tolerance value.
|
|
||||||
func TestComputedToleranceAlgImplementation(t *testing.T) {
|
|
||||||
|
|
||||||
startPods := 10
|
|
||||||
// 150 mCPU per pod.
|
|
||||||
totalUsedCPUOfAllPods := uint64(startPods * 150)
|
|
||||||
// Each pod starts out asking for 2X what is really needed.
|
|
||||||
// This means we will have a 50% ratio of used/requested
|
|
||||||
totalRequestedCPUOfAllPods := 2 * totalUsedCPUOfAllPods
|
|
||||||
requestedToUsed := float64(totalRequestedCPUOfAllPods / totalUsedCPUOfAllPods)
|
|
||||||
// Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels.
|
|
||||||
perPodRequested := int(totalRequestedCPUOfAllPods) / startPods
|
|
||||||
|
|
||||||
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
|
|
||||||
target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01
|
|
||||||
finalCpuPercentTarget := int(target * 100)
|
|
||||||
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
|
|
||||||
// the autoscaler will compare this vs. tolearnce. Lets calculate the usageRatio, which will be
|
|
||||||
// compared w tolerance.
|
|
||||||
usageRatioToleranceValue := float64(1 - resourcesUsedRatio)
|
|
||||||
// i.e. .60 * 20 -> scaled down expectation.
|
|
||||||
finalPods := math.Ceil(resourcesUsedRatio * float64(startPods))
|
|
||||||
|
|
||||||
glog.Infof("To breach tolerance %f we will create a utilization ratio difference of %f", tolerance, usageRatioToleranceValue)
|
|
||||||
tc := testCase{
|
|
||||||
minReplicas: 0,
|
|
||||||
maxReplicas: 1000,
|
|
||||||
initialReplicas: startPods,
|
|
||||||
desiredReplicas: int(finalPods),
|
|
||||||
CPUTarget: finalCpuPercentTarget,
|
|
||||||
reportedLevels: []uint64{
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
totalUsedCPUOfAllPods / 10,
|
|
||||||
},
|
|
||||||
reportedCPURequests: []resource.Quantity{
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
|
||||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
tc.runTest(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: add more tests, e.g., enforcement of upscal/downscale window.
|
|
||||||
|
|
Loading…
Reference in New Issue