2015-08-20 12:55:28 +00:00
|
|
|
/*
|
2016-06-03 00:25:58 +00:00
|
|
|
Copyright 2015 The Kubernetes Authors.
|
2015-08-20 12:55:28 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2015-09-10 13:10:07 +00:00
|
|
|
package podautoscaler
|
2015-08-20 12:55:28 +00:00
|
|
|
|
|
|
|
import (
|
2017-05-24 21:09:47 +00:00
|
|
|
"encoding/json"
|
2015-08-20 12:55:28 +00:00
|
|
|
"fmt"
|
2016-05-06 21:52:30 +00:00
|
|
|
"math"
|
2016-04-20 16:57:36 +00:00
|
|
|
"sync"
|
2015-08-20 12:55:28 +00:00
|
|
|
"testing"
|
2015-09-14 08:14:32 +00:00
|
|
|
"time"
|
2015-08-20 12:55:28 +00:00
|
|
|
|
2017-06-22 18:24:23 +00:00
|
|
|
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
2017-08-15 19:01:19 +00:00
|
|
|
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
2017-06-22 18:24:23 +00:00
|
|
|
"k8s.io/api/core/v1"
|
|
|
|
extensions "k8s.io/api/extensions/v1beta1"
|
2017-01-25 13:13:07 +00:00
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
2017-01-11 14:09:48 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
2017-02-20 06:17:16 +00:00
|
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
2017-01-11 14:09:48 +00:00
|
|
|
"k8s.io/apimachinery/pkg/watch"
|
2017-06-23 20:56:37 +00:00
|
|
|
"k8s.io/client-go/informers"
|
|
|
|
"k8s.io/client-go/kubernetes/fake"
|
2017-01-30 18:39:54 +00:00
|
|
|
clientfake "k8s.io/client-go/kubernetes/fake"
|
2017-01-25 20:07:10 +00:00
|
|
|
core "k8s.io/client-go/testing"
|
2017-04-27 00:13:48 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api"
|
2017-05-24 21:09:47 +00:00
|
|
|
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
2017-02-09 19:59:19 +00:00
|
|
|
"k8s.io/kubernetes/pkg/controller"
|
2015-09-10 13:10:07 +00:00
|
|
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
2017-02-20 06:17:16 +00:00
|
|
|
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
|
|
|
|
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
|
2015-08-20 12:55:28 +00:00
|
|
|
|
2017-08-17 18:50:51 +00:00
|
|
|
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
|
2017-08-30 18:53:13 +00:00
|
|
|
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
2015-09-14 08:14:32 +00:00
|
|
|
|
2015-12-13 08:54:43 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2017-01-30 18:39:54 +00:00
|
|
|
|
|
|
|
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
|
|
|
|
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
|
2015-08-20 12:55:28 +00:00
|
|
|
)
|
|
|
|
|
2017-05-24 21:09:47 +00:00
|
|
|
var statusOk = []autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
|
|
|
|
{Type: autoscalingv2.ScalingActive, Status: v1.ConditionTrue, Reason: "ValidMetricFound"},
|
|
|
|
{Type: autoscalingv2.ScalingLimited, Status: v1.ConditionFalse, Reason: "DesiredWithinRange"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// statusOkWithOverrides returns the "ok" status with the given conditions as overridden
|
|
|
|
func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCondition) []autoscalingv1.HorizontalPodAutoscalerCondition {
|
|
|
|
resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk))
|
|
|
|
copy(resv2, statusOk)
|
|
|
|
for _, override := range overrides {
|
|
|
|
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message)
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy to a v1 slice
|
|
|
|
resv1 := make([]autoscalingv1.HorizontalPodAutoscalerCondition, len(resv2))
|
|
|
|
for i, cond := range resv2 {
|
|
|
|
resv1[i] = autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv1.HorizontalPodAutoscalerConditionType(cond.Type),
|
|
|
|
Status: cond.Status,
|
|
|
|
Reason: cond.Reason,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resv1
|
|
|
|
}
|
|
|
|
|
2017-02-09 19:59:19 +00:00
|
|
|
func alwaysReady() bool { return true }
|
|
|
|
|
2016-03-09 00:27:13 +00:00
|
|
|
type fakeResource struct {
|
|
|
|
name string
|
|
|
|
apiVersion string
|
|
|
|
kind string
|
|
|
|
}
|
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
type testCase struct {
|
2016-04-20 16:57:36 +00:00
|
|
|
sync.Mutex
|
2016-04-27 04:35:14 +00:00
|
|
|
minReplicas int32
|
|
|
|
maxReplicas int32
|
|
|
|
initialReplicas int32
|
|
|
|
desiredReplicas int32
|
2016-04-20 16:57:36 +00:00
|
|
|
|
2015-10-13 15:24:23 +00:00
|
|
|
// CPU target utilization as a percentage of the requested resources.
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
CPUTarget int32
|
|
|
|
CPUCurrent int32
|
|
|
|
verifyCPUCurrent bool
|
|
|
|
reportedLevels []uint64
|
|
|
|
reportedCPURequests []resource.Quantity
|
2016-11-18 20:50:17 +00:00
|
|
|
reportedPodReadiness []v1.ConditionStatus
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
scaleUpdated bool
|
|
|
|
statusUpdated bool
|
|
|
|
eventCreated bool
|
|
|
|
verifyEvents bool
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI bool
|
2016-12-02 20:18:26 +00:00
|
|
|
metricsTarget []autoscalingv2.MetricSpec
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions []autoscalingv1.HorizontalPodAutoscalerCondition
|
2016-03-02 09:08:17 +00:00
|
|
|
// Channel with names of HPA objects which we have reconciled.
|
|
|
|
processed chan string
|
2016-03-09 00:27:13 +00:00
|
|
|
|
|
|
|
// Target resource information.
|
|
|
|
resource *fakeResource
|
2016-11-21 09:32:00 +00:00
|
|
|
|
|
|
|
// Last scale time
|
2016-12-03 18:57:26 +00:00
|
|
|
lastScaleTime *metav1.Time
|
2017-05-24 21:09:47 +00:00
|
|
|
|
|
|
|
// override the test clients
|
|
|
|
testClient *fake.Clientset
|
|
|
|
testMetricsClient *metricsfake.Clientset
|
|
|
|
testCMClient *cmfake.FakeCustomMetricsClient
|
2015-08-28 10:24:00 +00:00
|
|
|
}
|
|
|
|
|
2016-04-20 16:57:36 +00:00
|
|
|
// Needs to be called under a lock.
|
2016-02-23 14:18:49 +00:00
|
|
|
func (tc *testCase) computeCPUCurrent() {
|
|
|
|
if len(tc.reportedLevels) != len(tc.reportedCPURequests) || len(tc.reportedLevels) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
reported := 0
|
|
|
|
for _, r := range tc.reportedLevels {
|
|
|
|
reported += int(r)
|
|
|
|
}
|
|
|
|
requested := 0
|
|
|
|
for _, req := range tc.reportedCPURequests {
|
|
|
|
requested += int(req.MilliValue())
|
|
|
|
}
|
2016-04-27 04:35:14 +00:00
|
|
|
tc.CPUCurrent = int32(100 * reported / requested)
|
2016-02-23 14:18:49 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 21:09:47 +00:00
|
|
|
func init() {
|
|
|
|
// set this high so we don't accidentally run into it when testing
|
|
|
|
scaleUpLimitFactor = 8
|
|
|
|
}
|
|
|
|
|
2017-02-20 06:17:16 +00:00
|
|
|
func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient) {
|
2015-09-14 08:14:32 +00:00
|
|
|
namespace := "test-namespace"
|
|
|
|
hpaName := "test-hpa"
|
|
|
|
podNamePrefix := "test-pod"
|
2016-12-02 20:18:26 +00:00
|
|
|
// TODO: also test with TargetSelector
|
|
|
|
selector := map[string]string{"name": podNamePrefix}
|
2015-09-14 08:14:32 +00:00
|
|
|
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.scaleUpdated = false
|
2016-02-23 12:05:07 +00:00
|
|
|
tc.statusUpdated = false
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.eventCreated = false
|
2016-03-02 09:08:17 +00:00
|
|
|
tc.processed = make(chan string, 100)
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
if tc.CPUCurrent == 0 {
|
|
|
|
tc.computeCPUCurrent()
|
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
|
2016-03-09 00:27:13 +00:00
|
|
|
// TODO(madhusudancs): HPA only supports resources in extensions/v1beta1 right now. Add
|
|
|
|
// tests for "v1" replicationcontrollers when HPA adds support for cross-group scale.
|
|
|
|
if tc.resource == nil {
|
|
|
|
tc.resource = &fakeResource{
|
|
|
|
name: "test-rc",
|
|
|
|
apiVersion: "extensions/v1beta1",
|
|
|
|
kind: "replicationcontrollers",
|
|
|
|
}
|
|
|
|
}
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Unlock()
|
2016-03-09 00:27:13 +00:00
|
|
|
|
2016-01-29 06:34:08 +00:00
|
|
|
fakeClient := &fake.Clientset{}
|
|
|
|
fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2016-12-02 20:18:26 +00:00
|
|
|
obj := &autoscalingv2.HorizontalPodAutoscalerList{
|
|
|
|
Items: []autoscalingv2.HorizontalPodAutoscaler{
|
2015-09-14 08:14:32 +00:00
|
|
|
{
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2015-09-14 08:14:32 +00:00
|
|
|
Name: hpaName,
|
|
|
|
Namespace: namespace,
|
|
|
|
SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
|
|
|
|
},
|
2016-12-02 20:18:26 +00:00
|
|
|
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
|
|
|
|
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
2016-05-05 10:27:24 +00:00
|
|
|
Kind: tc.resource.kind,
|
|
|
|
Name: tc.resource.name,
|
|
|
|
APIVersion: tc.resource.apiVersion,
|
2015-09-14 08:14:32 +00:00
|
|
|
},
|
2015-10-13 15:24:23 +00:00
|
|
|
MinReplicas: &tc.minReplicas,
|
2015-09-17 12:08:39 +00:00
|
|
|
MaxReplicas: tc.maxReplicas,
|
2015-09-14 08:14:32 +00:00
|
|
|
},
|
2016-12-02 20:18:26 +00:00
|
|
|
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
|
2016-02-23 12:05:07 +00:00
|
|
|
CurrentReplicas: tc.initialReplicas,
|
|
|
|
DesiredReplicas: tc.initialReplicas,
|
2017-05-24 21:09:47 +00:00
|
|
|
LastScaleTime: tc.lastScaleTime,
|
2016-02-23 12:05:07 +00:00
|
|
|
},
|
2015-09-14 08:14:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2016-04-20 16:57:36 +00:00
|
|
|
|
2017-05-24 21:09:47 +00:00
|
|
|
if tc.CPUTarget > 0 {
|
2016-12-02 20:18:26 +00:00
|
|
|
obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.ResourceMetricSourceType,
|
|
|
|
Resource: &autoscalingv2.ResourceMetricSource{
|
|
|
|
Name: v1.ResourceCPU,
|
|
|
|
TargetAverageUtilization: &tc.CPUTarget,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(tc.metricsTarget) > 0 {
|
|
|
|
obj.Items[0].Spec.Metrics = append(obj.Items[0].Spec.Metrics, tc.metricsTarget...)
|
2015-10-13 15:24:23 +00:00
|
|
|
}
|
2016-12-02 20:18:26 +00:00
|
|
|
|
|
|
|
if len(obj.Items[0].Spec.Metrics) == 0 {
|
|
|
|
// manually add in the defaulting logic
|
|
|
|
obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.ResourceMetricSourceType,
|
|
|
|
Resource: &autoscalingv2.ResourceMetricSource{
|
|
|
|
Name: v1.ResourceCPU,
|
|
|
|
},
|
|
|
|
},
|
2016-01-29 11:20:19 +00:00
|
|
|
}
|
|
|
|
}
|
2016-12-02 20:18:26 +00:00
|
|
|
|
|
|
|
// and... convert to autoscaling v1 to return the right type
|
|
|
|
objv1, err := UnsafeConvertToVersionVia(obj, autoscalingv1.SchemeGroupVersion)
|
|
|
|
if err != nil {
|
|
|
|
return true, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, objv1, nil
|
2015-09-14 08:14:32 +00:00
|
|
|
})
|
|
|
|
|
2016-03-09 00:27:13 +00:00
|
|
|
fakeClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2016-03-09 00:27:13 +00:00
|
|
|
obj := &extensions.Scale{
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2016-03-09 00:27:13 +00:00
|
|
|
Name: tc.resource.name,
|
|
|
|
Namespace: namespace,
|
|
|
|
},
|
|
|
|
Spec: extensions.ScaleSpec{
|
|
|
|
Replicas: tc.initialReplicas,
|
|
|
|
},
|
|
|
|
Status: extensions.ScaleStatus{
|
|
|
|
Replicas: tc.initialReplicas,
|
2016-12-02 20:18:26 +00:00
|
|
|
Selector: selector,
|
2016-03-09 00:27:13 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
fakeClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2016-03-09 00:27:13 +00:00
|
|
|
obj := &extensions.Scale{
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2016-03-09 00:27:13 +00:00
|
|
|
Name: tc.resource.name,
|
|
|
|
Namespace: namespace,
|
|
|
|
},
|
|
|
|
Spec: extensions.ScaleSpec{
|
|
|
|
Replicas: tc.initialReplicas,
|
|
|
|
},
|
|
|
|
Status: extensions.ScaleStatus{
|
|
|
|
Replicas: tc.initialReplicas,
|
2016-12-02 20:18:26 +00:00
|
|
|
Selector: selector,
|
2016-03-09 00:27:13 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2015-10-09 22:49:10 +00:00
|
|
|
obj := &extensions.Scale{
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2016-03-09 00:27:13 +00:00
|
|
|
Name: tc.resource.name,
|
2015-09-14 08:14:32 +00:00
|
|
|
Namespace: namespace,
|
|
|
|
},
|
2015-10-09 22:49:10 +00:00
|
|
|
Spec: extensions.ScaleSpec{
|
2015-09-14 08:14:32 +00:00
|
|
|
Replicas: tc.initialReplicas,
|
|
|
|
},
|
2015-10-09 22:49:10 +00:00
|
|
|
Status: extensions.ScaleStatus{
|
2015-09-14 08:14:32 +00:00
|
|
|
Replicas: tc.initialReplicas,
|
2016-12-02 20:18:26 +00:00
|
|
|
Selector: selector,
|
2015-09-14 08:14:32 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
2016-01-29 06:34:08 +00:00
|
|
|
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2016-11-18 20:50:17 +00:00
|
|
|
obj := &v1.PodList{}
|
2015-10-13 15:24:23 +00:00
|
|
|
for i := 0; i < len(tc.reportedCPURequests); i++ {
|
2016-11-18 20:50:17 +00:00
|
|
|
podReadiness := v1.ConditionTrue
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
if tc.reportedPodReadiness != nil {
|
|
|
|
podReadiness = tc.reportedPodReadiness[i]
|
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
2016-11-18 20:50:17 +00:00
|
|
|
pod := v1.Pod{
|
|
|
|
Status: v1.PodStatus{
|
|
|
|
Phase: v1.PodRunning,
|
|
|
|
Conditions: []v1.PodCondition{
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
{
|
2016-11-18 20:50:17 +00:00
|
|
|
Type: v1.PodReady,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
Status: podReadiness,
|
|
|
|
},
|
|
|
|
},
|
2015-09-14 08:14:32 +00:00
|
|
|
},
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2015-09-14 08:14:32 +00:00
|
|
|
Name: podName,
|
|
|
|
Namespace: namespace,
|
|
|
|
Labels: map[string]string{
|
|
|
|
"name": podNamePrefix,
|
|
|
|
},
|
|
|
|
},
|
2016-11-18 20:50:17 +00:00
|
|
|
Spec: v1.PodSpec{
|
|
|
|
Containers: []v1.Container{
|
2015-10-13 15:24:23 +00:00
|
|
|
{
|
2016-11-18 20:50:17 +00:00
|
|
|
Resources: v1.ResourceRequirements{
|
|
|
|
Requests: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: tc.reportedCPURequests[i],
|
2015-10-13 15:24:23 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-09-14 08:14:32 +00:00
|
|
|
}
|
|
|
|
obj.Items = append(obj.Items, pod)
|
|
|
|
}
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
2016-03-09 00:27:13 +00:00
|
|
|
fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2016-04-13 22:33:15 +00:00
|
|
|
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
|
|
|
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
|
2016-03-09 00:27:13 +00:00
|
|
|
tc.scaleUpdated = true
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
fakeClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2016-04-13 22:33:15 +00:00
|
|
|
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
|
|
|
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
|
2016-03-09 00:27:13 +00:00
|
|
|
tc.scaleUpdated = true
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2016-04-13 22:33:15 +00:00
|
|
|
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
|
|
|
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.scaleUpdated = true
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
2016-01-29 06:34:08 +00:00
|
|
|
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2016-12-02 20:18:26 +00:00
|
|
|
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler)
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected")
|
|
|
|
assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
|
|
|
|
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
|
2016-02-23 14:18:49 +00:00
|
|
|
if tc.verifyCPUCurrent {
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil")
|
|
|
|
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
|
2016-02-23 14:18:49 +00:00
|
|
|
}
|
2017-05-24 21:09:47 +00:00
|
|
|
var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition
|
|
|
|
if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil {
|
|
|
|
return true, nil, err
|
|
|
|
}
|
|
|
|
// TODO: it's ok not to sort these becaues statusOk
|
|
|
|
// contains all the conditions, so we'll never be appending.
|
|
|
|
// Default to statusOk when missing any specific conditions
|
|
|
|
if tc.expectedConditions == nil {
|
|
|
|
tc.expectedConditions = statusOkWithOverrides()
|
|
|
|
}
|
|
|
|
// clear the message so that we can easily compare
|
|
|
|
for i := range actualConditions {
|
|
|
|
actualConditions[i].Message = ""
|
|
|
|
actualConditions[i].LastTransitionTime = metav1.Time{}
|
|
|
|
}
|
|
|
|
assert.Equal(t, tc.expectedConditions, actualConditions, "the status conditions should have been as expected")
|
2016-03-02 08:29:17 +00:00
|
|
|
tc.statusUpdated = true
|
2016-03-02 09:08:17 +00:00
|
|
|
// Every time we reconcile HPA object we are updating status.
|
|
|
|
tc.processed <- obj.Name
|
2015-09-14 08:14:32 +00:00
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
2016-03-02 08:29:17 +00:00
|
|
|
fakeWatch := watch.NewFake()
|
|
|
|
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
|
|
|
|
2017-02-20 06:17:16 +00:00
|
|
|
fakeMetricsClient := &metricsfake.Clientset{}
|
2017-05-03 22:11:22 +00:00
|
|
|
fakeMetricsClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2017-02-20 06:17:16 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
|
|
|
metrics := &metricsapi.PodMetricsList{}
|
|
|
|
for i, cpu := range tc.reportedLevels {
|
|
|
|
// NB: the list reactor actually does label selector filtering for us,
|
|
|
|
// so we have to make sure our results match the label selector
|
|
|
|
podMetric := metricsapi.PodMetrics{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
|
|
|
Namespace: namespace,
|
|
|
|
Labels: selector,
|
|
|
|
},
|
|
|
|
Timestamp: metav1.Time{Time: time.Now()},
|
|
|
|
Containers: []metricsapi.ContainerMetrics{
|
|
|
|
{
|
|
|
|
Name: "container",
|
2017-07-15 05:25:54 +00:00
|
|
|
Usage: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(
|
2017-02-20 06:17:16 +00:00
|
|
|
int64(cpu),
|
|
|
|
resource.DecimalSI),
|
2017-07-15 05:25:54 +00:00
|
|
|
v1.ResourceMemory: *resource.NewQuantity(
|
2017-02-20 06:17:16 +00:00
|
|
|
int64(1024*1024),
|
|
|
|
resource.BinarySI),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
metrics.Items = append(metrics.Items, podMetric)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, metrics, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
fakeCMClient := &cmfake.FakeCustomMetricsClient{}
|
|
|
|
fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
|
|
|
getForAction, wasGetFor := action.(cmfake.GetForAction)
|
|
|
|
if !wasGetFor {
|
|
|
|
return true, nil, fmt.Errorf("expected a get-for action, got %v instead", action)
|
|
|
|
}
|
|
|
|
|
|
|
|
if getForAction.GetName() == "*" {
|
|
|
|
metrics := &cmapi.MetricValueList{}
|
|
|
|
|
|
|
|
// multiple objects
|
|
|
|
assert.Equal(t, "pods", getForAction.GetResource().Resource, "the type of object that we requested multiple metrics for should have been pods")
|
|
|
|
assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")
|
|
|
|
|
|
|
|
for i, level := range tc.reportedLevels {
|
|
|
|
podMetric := cmapi.MetricValue{
|
2017-07-15 05:25:54 +00:00
|
|
|
DescribedObject: v1.ObjectReference{
|
2017-02-20 06:17:16 +00:00
|
|
|
Kind: "Pod",
|
|
|
|
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
|
|
|
Namespace: namespace,
|
|
|
|
},
|
|
|
|
Timestamp: metav1.Time{Time: time.Now()},
|
|
|
|
MetricName: "qps",
|
|
|
|
Value: *resource.NewMilliQuantity(int64(level), resource.DecimalSI),
|
|
|
|
}
|
|
|
|
metrics.Items = append(metrics.Items, podMetric)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, metrics, nil
|
2017-09-09 18:53:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
name := getForAction.GetName()
|
|
|
|
mapper := api.Registry.RESTMapper()
|
|
|
|
metrics := &cmapi.MetricValueList{}
|
|
|
|
var matchedTarget *autoscalingv2.MetricSpec
|
|
|
|
for i, target := range tc.metricsTarget {
|
|
|
|
if target.Type == autoscalingv2.ObjectMetricSourceType && name == target.Object.Target.Name {
|
|
|
|
gk := schema.FromAPIVersionAndKind(target.Object.Target.APIVersion, target.Object.Target.Kind).GroupKind()
|
|
|
|
mapping, err := mapper.RESTMapping(gk)
|
|
|
|
if err != nil {
|
|
|
|
t.Logf("unable to get mapping for %s: %v", gk.String(), err)
|
|
|
|
continue
|
2017-02-20 06:17:16 +00:00
|
|
|
}
|
2017-09-09 18:53:34 +00:00
|
|
|
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
2017-02-20 06:17:16 +00:00
|
|
|
|
2017-09-09 18:53:34 +00:00
|
|
|
if getForAction.GetResource().Resource == groupResource.String() {
|
|
|
|
matchedTarget = &tc.metricsTarget[i]
|
|
|
|
}
|
2017-02-20 06:17:16 +00:00
|
|
|
}
|
2017-09-09 18:53:34 +00:00
|
|
|
}
|
|
|
|
assert.NotNil(t, matchedTarget, "this request should have matched one of the metric specs")
|
|
|
|
assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")
|
2017-02-20 06:17:16 +00:00
|
|
|
|
2017-09-09 18:53:34 +00:00
|
|
|
metrics.Items = []cmapi.MetricValue{
|
|
|
|
{
|
|
|
|
DescribedObject: v1.ObjectReference{
|
|
|
|
Kind: matchedTarget.Object.Target.Kind,
|
|
|
|
APIVersion: matchedTarget.Object.Target.APIVersion,
|
|
|
|
Name: name,
|
|
|
|
},
|
|
|
|
Timestamp: metav1.Time{Time: time.Now()},
|
|
|
|
MetricName: "qps",
|
|
|
|
Value: *resource.NewMilliQuantity(int64(tc.reportedLevels[0]), resource.DecimalSI),
|
|
|
|
},
|
2017-02-20 06:17:16 +00:00
|
|
|
}
|
2017-09-09 18:53:34 +00:00
|
|
|
|
|
|
|
return true, metrics, nil
|
2017-02-20 06:17:16 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return fakeClient, fakeMetricsClient, fakeCMClient
|
2015-08-28 10:24:00 +00:00
|
|
|
}
|
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
func (tc *testCase) verifyResults(t *testing.T) {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.scaleUpdated, "the scale should only be updated if we expected a change in replicas")
|
|
|
|
assert.True(t, tc.statusUpdated, "the status should have been updated")
|
2015-09-14 08:14:32 +00:00
|
|
|
if tc.verifyEvents {
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas")
|
2015-08-28 10:24:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-06 19:57:05 +00:00
|
|
|
func (tc *testCase) setupController(t *testing.T) (*HorizontalController, informers.SharedInformerFactory) {
|
2017-02-20 06:17:16 +00:00
|
|
|
testClient, testMetricsClient, testCMClient := tc.prepareTestClient(t)
|
2017-05-24 21:09:47 +00:00
|
|
|
if tc.testClient != nil {
|
|
|
|
testClient = tc.testClient
|
|
|
|
}
|
|
|
|
if tc.testMetricsClient != nil {
|
|
|
|
testMetricsClient = tc.testMetricsClient
|
|
|
|
}
|
|
|
|
if tc.testCMClient != nil {
|
|
|
|
testCMClient = tc.testCMClient
|
|
|
|
}
|
2017-02-20 06:17:16 +00:00
|
|
|
metricsClient := metrics.NewRESTMetricsClient(
|
2017-08-30 18:53:13 +00:00
|
|
|
testMetricsClient.MetricsV1beta1(),
|
2017-02-20 06:17:16 +00:00
|
|
|
testCMClient,
|
|
|
|
)
|
2016-03-03 10:48:07 +00:00
|
|
|
|
2017-01-30 18:39:54 +00:00
|
|
|
eventClient := &clientfake.Clientset{}
|
2017-03-08 07:02:34 +00:00
|
|
|
eventClient.AddReactor("create", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2017-01-30 18:39:54 +00:00
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
2017-07-15 05:25:54 +00:00
|
|
|
obj := action.(core.CreateAction).GetObject().(*v1.Event)
|
2017-01-30 18:39:54 +00:00
|
|
|
if tc.verifyEvents {
|
|
|
|
switch obj.Reason {
|
|
|
|
case "SuccessfulRescale":
|
2016-12-02 20:18:26 +00:00
|
|
|
assert.Equal(t, fmt.Sprintf("New size: %d; reason: cpu resource utilization (percentage of request) above target", tc.desiredReplicas), obj.Message)
|
2017-01-30 18:39:54 +00:00
|
|
|
case "DesiredReplicasComputed":
|
|
|
|
assert.Equal(t, fmt.Sprintf(
|
|
|
|
"Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)",
|
|
|
|
tc.desiredReplicas,
|
|
|
|
(int64(tc.reportedLevels[0])*100)/tc.reportedCPURequests[0].MilliValue(), tc.initialReplicas), obj.Message)
|
|
|
|
default:
|
|
|
|
assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tc.eventCreated = true
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
replicaCalc := &ReplicaCalculator{
|
|
|
|
metricsClient: metricsClient,
|
|
|
|
podsGetter: testClient.Core(),
|
|
|
|
}
|
|
|
|
|
2017-02-08 21:18:21 +00:00
|
|
|
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
2017-02-18 10:32:38 +00:00
|
|
|
defaultUpscaleForbiddenWindow := 3 * time.Minute
|
|
|
|
defaultDownscaleForbiddenWindow := 5 * time.Minute
|
2016-03-03 10:48:07 +00:00
|
|
|
|
2017-02-09 19:59:19 +00:00
|
|
|
hpaController := NewHorizontalController(
|
|
|
|
eventClient.Core(),
|
|
|
|
testClient.Extensions(),
|
|
|
|
testClient.Autoscaling(),
|
|
|
|
replicaCalc,
|
|
|
|
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
|
|
|
controller.NoResyncPeriodFunc(),
|
2017-02-18 10:32:38 +00:00
|
|
|
defaultUpscaleForbiddenWindow,
|
|
|
|
defaultDownscaleForbiddenWindow,
|
2017-02-09 19:59:19 +00:00
|
|
|
)
|
|
|
|
hpaController.hpaListerSynced = alwaysReady
|
2016-03-03 10:48:07 +00:00
|
|
|
|
2017-06-06 19:57:05 +00:00
|
|
|
return hpaController, informerFactory
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tc *testCase) runTestWithController(t *testing.T, hpaController *HorizontalController, informerFactory informers.SharedInformerFactory) {
|
2016-03-02 08:29:17 +00:00
|
|
|
stop := make(chan struct{})
|
|
|
|
defer close(stop)
|
2017-02-09 19:59:19 +00:00
|
|
|
informerFactory.Start(stop)
|
2016-03-02 08:29:17 +00:00
|
|
|
go hpaController.Run(stop)
|
2016-03-03 10:48:07 +00:00
|
|
|
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Lock()
|
2015-09-14 08:14:32 +00:00
|
|
|
if tc.verifyEvents {
|
2016-04-20 16:57:36 +00:00
|
|
|
tc.Unlock()
|
2015-09-14 08:14:32 +00:00
|
|
|
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
|
2016-03-03 10:48:07 +00:00
|
|
|
time.Sleep(2 * time.Second)
|
2016-04-20 16:57:36 +00:00
|
|
|
} else {
|
|
|
|
tc.Unlock()
|
2015-09-14 08:14:32 +00:00
|
|
|
}
|
2016-03-02 09:08:17 +00:00
|
|
|
// Wait for HPA to be processed.
|
|
|
|
<-tc.processed
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.verifyResults(t)
|
|
|
|
}
|
2015-08-20 12:55:28 +00:00
|
|
|
|
2017-06-06 19:57:05 +00:00
|
|
|
func (tc *testCase) runTest(t *testing.T) {
|
|
|
|
hpaController, informerFactory := tc.setupController(t)
|
|
|
|
tc.runTestWithController(t, hpaController, informerFactory)
|
|
|
|
}
|
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
func TestScaleUp(t *testing.T) {
|
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 5,
|
|
|
|
CPUTarget: 30,
|
2016-02-23 14:18:49 +00:00
|
|
|
verifyCPUCurrent: true,
|
2015-10-13 15:24:23 +00:00
|
|
|
reportedLevels: []uint64{300, 500, 700},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2015-08-25 17:16:47 +00:00
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
2015-08-25 17:16:47 +00:00
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
func TestScaleUpUnreadyLessScale(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 4,
|
|
|
|
CPUTarget: 30,
|
|
|
|
CPUCurrent: 60,
|
|
|
|
verifyCPUCurrent: true,
|
|
|
|
reportedLevels: []uint64{300, 500, 700},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2016-11-18 20:50:17 +00:00
|
|
|
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScaleUpUnreadyNoScale(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 30,
|
|
|
|
CPUCurrent: 40,
|
|
|
|
verifyCPUCurrent: true,
|
|
|
|
reportedLevels: []uint64{400, 500, 700},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2016-11-18 20:50:17 +00:00
|
|
|
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
}),
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-03-09 00:27:13 +00:00
|
|
|
func TestScaleUpDeployment(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 5,
|
|
|
|
CPUTarget: 30,
|
|
|
|
verifyCPUCurrent: true,
|
|
|
|
reportedLevels: []uint64{300, 500, 700},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2016-03-09 00:27:13 +00:00
|
|
|
resource: &fakeResource{
|
|
|
|
name: "test-dep",
|
|
|
|
apiVersion: "extensions/v1beta1",
|
|
|
|
kind: "deployments",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScaleUpReplicaSet(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 5,
|
|
|
|
CPUTarget: 30,
|
|
|
|
verifyCPUCurrent: true,
|
|
|
|
reportedLevels: []uint64{300, 500, 700},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2016-03-09 00:27:13 +00:00
|
|
|
resource: &fakeResource{
|
|
|
|
name: "test-replicaset",
|
|
|
|
apiVersion: "extensions/v1beta1",
|
|
|
|
kind: "replicasets",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-01-29 11:20:19 +00:00
|
|
|
func TestScaleUpCM(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 4,
|
|
|
|
CPUTarget: 0,
|
2016-12-02 20:18:26 +00:00
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.PodsMetricSourceType,
|
|
|
|
Pods: &autoscalingv2.PodsMetricSource{
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetAverageValue: resource.MustParse("15.0"),
|
|
|
|
},
|
|
|
|
},
|
2016-01-29 11:20:19 +00:00
|
|
|
},
|
2017-02-20 06:17:16 +00:00
|
|
|
reportedLevels: []uint64{20000, 10000, 30000},
|
2016-01-29 11:20:19 +00:00
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
func TestScaleUpCMUnreadyLessScale(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 4,
|
|
|
|
CPUTarget: 0,
|
2016-12-02 20:18:26 +00:00
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.PodsMetricSourceType,
|
|
|
|
Pods: &autoscalingv2.PodsMetricSource{
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetAverageValue: resource.MustParse("15.0"),
|
|
|
|
},
|
|
|
|
},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
2017-02-20 06:17:16 +00:00
|
|
|
reportedLevels: []uint64{50000, 10000, 30000},
|
2016-11-18 20:50:17 +00:00
|
|
|
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 0,
|
2016-12-02 20:18:26 +00:00
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.PodsMetricSourceType,
|
|
|
|
Pods: &autoscalingv2.PodsMetricSource{
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetAverageValue: resource.MustParse("15.0"),
|
|
|
|
},
|
|
|
|
},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
2017-02-20 06:17:16 +00:00
|
|
|
reportedLevels: []uint64{50000, 15000, 30000},
|
2016-11-18 20:50:17 +00:00
|
|
|
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
}),
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2017-02-20 06:17:16 +00:00
|
|
|
func TestScaleUpCMObject(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 4,
|
|
|
|
CPUTarget: 0,
|
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.ObjectMetricSourceType,
|
|
|
|
Object: &autoscalingv2.ObjectMetricSource{
|
|
|
|
Target: autoscalingv2.CrossVersionObjectReference{
|
|
|
|
APIVersion: "extensions/v1beta1",
|
|
|
|
Kind: "Deployment",
|
|
|
|
Name: "some-deployment",
|
|
|
|
},
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetValue: resource.MustParse("15.0"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
reportedLevels: []uint64{20000},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
func TestScaleDown(t *testing.T) {
|
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 5,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 50,
|
2016-02-23 14:18:49 +00:00
|
|
|
verifyCPUCurrent: true,
|
2015-10-13 15:24:23 +00:00
|
|
|
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2015-08-25 17:16:47 +00:00
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
2015-08-25 17:16:47 +00:00
|
|
|
|
2016-01-29 11:20:19 +00:00
|
|
|
func TestScaleDownCM(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 5,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 0,
|
2016-12-02 20:18:26 +00:00
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.PodsMetricSourceType,
|
|
|
|
Pods: &autoscalingv2.PodsMetricSource{
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetAverageValue: resource.MustParse("20.0"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-02-20 06:17:16 +00:00
|
|
|
reportedLevels: []uint64{12000, 12000, 12000, 12000, 12000},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScaleDownCMObject(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 5,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 0,
|
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.ObjectMetricSourceType,
|
|
|
|
Object: &autoscalingv2.ObjectMetricSource{
|
|
|
|
Target: autoscalingv2.CrossVersionObjectReference{
|
|
|
|
APIVersion: "extensions/v1beta1",
|
|
|
|
Kind: "Deployment",
|
|
|
|
Name: "some-deployment",
|
|
|
|
},
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetValue: resource.MustParse("20.0"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
reportedLevels: []uint64{12000},
|
2016-01-29 11:20:19 +00:00
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
func TestScaleDownIgnoresUnreadyPods(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 5,
|
|
|
|
desiredReplicas: 2,
|
|
|
|
CPUTarget: 50,
|
|
|
|
CPUCurrent: 30,
|
|
|
|
verifyCPUCurrent: true,
|
|
|
|
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2016-11-18 20:50:17 +00:00
|
|
|
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
func TestTolerance(t *testing.T) {
|
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 100,
|
|
|
|
reportedLevels: []uint64{1010, 1030, 1020},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
}),
|
2016-01-29 11:20:19 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestToleranceCM(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
2016-12-02 20:18:26 +00:00
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.PodsMetricSourceType,
|
|
|
|
Pods: &autoscalingv2.PodsMetricSource{
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetAverageValue: resource.MustParse("20.0"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-02-20 06:17:16 +00:00
|
|
|
reportedLevels: []uint64{20000, 20001, 21000},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
}),
|
2017-02-20 06:17:16 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestToleranceCMObject(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.ObjectMetricSourceType,
|
|
|
|
Object: &autoscalingv2.ObjectMetricSource{
|
|
|
|
Target: autoscalingv2.CrossVersionObjectReference{
|
|
|
|
APIVersion: "extensions/v1beta1",
|
|
|
|
Kind: "Deployment",
|
|
|
|
Name: "some-deployment",
|
|
|
|
},
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetValue: resource.MustParse("20.0"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
reportedLevels: []uint64{20050},
|
2016-01-29 11:20:19 +00:00
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
}),
|
2015-08-25 17:16:47 +00:00
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
2015-08-20 12:55:28 +00:00
|
|
|
|
2015-09-17 12:08:39 +00:00
|
|
|
func TestMinReplicas(t *testing.T) {
|
2015-09-14 08:14:32 +00:00
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 2,
|
|
|
|
CPUTarget: 90,
|
|
|
|
reportedLevels: []uint64{10, 95, 10},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.ScalingLimited,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "TooFewReplicas",
|
|
|
|
}),
|
2015-09-14 13:08:43 +00:00
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
2015-09-14 13:08:43 +00:00
|
|
|
|
2017-07-16 19:36:08 +00:00
|
|
|
func TestMinReplicasDesiredZero(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 2,
|
|
|
|
CPUTarget: 90,
|
|
|
|
reportedLevels: []uint64{0, 0, 0},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-07-16 19:36:08 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.ScalingLimited,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "TooFewReplicas",
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-02-12 15:26:59 +00:00
|
|
|
func TestZeroReplicas(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 3,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 0,
|
2016-07-19 15:54:38 +00:00
|
|
|
desiredReplicas: 0,
|
2016-02-12 15:26:59 +00:00
|
|
|
CPUTarget: 90,
|
|
|
|
reportedLevels: []uint64{},
|
|
|
|
reportedCPURequests: []resource.Quantity{},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
|
|
|
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "ScalingDisabled"},
|
|
|
|
},
|
2016-02-12 15:26:59 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-02-23 14:18:49 +00:00
|
|
|
func TestTooFewReplicas(t *testing.T) {
|
2016-02-12 15:26:59 +00:00
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 3,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 2,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 90,
|
|
|
|
reportedLevels: []uint64{},
|
|
|
|
reportedCPURequests: []resource.Quantity{},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
|
|
|
|
},
|
2016-02-12 15:26:59 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTooManyReplicas(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 3,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 10,
|
|
|
|
desiredReplicas: 5,
|
|
|
|
CPUTarget: 90,
|
|
|
|
reportedLevels: []uint64{},
|
|
|
|
reportedCPURequests: []resource.Quantity{},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
|
|
|
|
},
|
2016-02-12 15:26:59 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2015-09-17 12:08:39 +00:00
|
|
|
func TestMaxReplicas(t *testing.T) {
|
2015-09-14 08:14:32 +00:00
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 5,
|
|
|
|
CPUTarget: 90,
|
|
|
|
reportedLevels: []uint64{8000, 9500, 1000},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.ScalingLimited,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "TooManyReplicas",
|
|
|
|
}),
|
2015-09-14 08:14:32 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
2015-08-20 12:55:28 +00:00
|
|
|
}
|
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
func TestSuperfluousMetrics(t *testing.T) {
|
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 4,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
desiredReplicas: 6,
|
2015-10-13 15:24:23 +00:00
|
|
|
CPUTarget: 100,
|
|
|
|
reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.ScalingLimited,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "TooManyReplicas",
|
|
|
|
}),
|
2015-08-26 14:17:18 +00:00
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
2015-08-28 10:24:00 +00:00
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
func TestMissingMetrics(t *testing.T) {
|
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 4,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
desiredReplicas: 3,
|
2015-10-13 15:24:23 +00:00
|
|
|
CPUTarget: 100,
|
|
|
|
reportedLevels: []uint64{400, 95},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2015-08-20 12:55:28 +00:00
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEmptyMetrics(t *testing.T) {
|
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 4,
|
|
|
|
desiredReplicas: 4,
|
|
|
|
CPUTarget: 100,
|
|
|
|
reportedLevels: []uint64{},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
|
|
|
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},
|
|
|
|
},
|
2015-08-20 12:55:28 +00:00
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2015-10-13 15:24:23 +00:00
|
|
|
func TestEmptyCPURequest(t *testing.T) {
|
2015-09-14 08:14:32 +00:00
|
|
|
tc := testCase{
|
2015-09-17 12:08:39 +00:00
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
2015-09-14 08:14:32 +00:00
|
|
|
initialReplicas: 1,
|
2015-10-13 15:24:23 +00:00
|
|
|
desiredReplicas: 1,
|
|
|
|
CPUTarget: 100,
|
2015-09-14 08:14:32 +00:00
|
|
|
reportedLevels: []uint64{200},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
|
|
|
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},
|
|
|
|
},
|
2015-10-13 15:24:23 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEventCreated(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 1,
|
|
|
|
desiredReplicas: 2,
|
|
|
|
CPUTarget: 50,
|
|
|
|
reportedLevels: []uint64{200},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")},
|
|
|
|
verifyEvents: true,
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2015-08-25 17:16:47 +00:00
|
|
|
}
|
2015-09-14 08:14:32 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
2015-08-25 17:16:47 +00:00
|
|
|
|
2015-09-14 08:14:32 +00:00
|
|
|
func TestEventNotCreated(t *testing.T) {
|
|
|
|
tc := testCase{
|
2015-10-13 15:24:23 +00:00
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 2,
|
|
|
|
desiredReplicas: 2,
|
|
|
|
CPUTarget: 50,
|
|
|
|
reportedLevels: []uint64{200, 200},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")},
|
|
|
|
verifyEvents: true,
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
}),
|
2015-09-14 08:14:32 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
2015-08-20 12:55:28 +00:00
|
|
|
}
|
2015-10-13 15:24:23 +00:00
|
|
|
|
2016-10-17 15:14:15 +00:00
|
|
|
func TestMissingReports(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 4,
|
|
|
|
desiredReplicas: 2,
|
|
|
|
CPUTarget: 50,
|
|
|
|
reportedLevels: []uint64{200},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2016-10-17 15:14:15 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpscaleCap(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 100,
|
|
|
|
initialReplicas: 3,
|
2017-05-24 21:09:47 +00:00
|
|
|
desiredReplicas: 24,
|
|
|
|
CPUTarget: 10,
|
|
|
|
reportedLevels: []uint64{100, 200, 300},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.ScalingLimited,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ScaleUpLimit",
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConditionInvalidSelectorMissing(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 100,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 10,
|
|
|
|
reportedLevels: []uint64{100, 200, 300},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{
|
|
|
|
Type: autoscalingv1.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "SucceededGetScale",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: autoscalingv1.ScalingActive,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "InvalidSelector",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
testClient, _, _ := tc.prepareTestClient(t)
|
|
|
|
tc.testClient = testClient
|
|
|
|
|
|
|
|
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
|
|
|
obj := &extensions.Scale{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: tc.resource.name,
|
|
|
|
},
|
|
|
|
Spec: extensions.ScaleSpec{
|
|
|
|
Replicas: tc.initialReplicas,
|
|
|
|
},
|
|
|
|
Status: extensions.ScaleStatus{
|
|
|
|
Replicas: tc.initialReplicas,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConditionInvalidSelectorUnparsable(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 100,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 10,
|
|
|
|
reportedLevels: []uint64{100, 200, 300},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{
|
|
|
|
Type: autoscalingv1.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "SucceededGetScale",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: autoscalingv1.ScalingActive,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "InvalidSelector",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
testClient, _, _ := tc.prepareTestClient(t)
|
|
|
|
tc.testClient = testClient
|
|
|
|
|
|
|
|
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
|
|
|
obj := &extensions.Scale{
|
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
|
|
Name: tc.resource.name,
|
|
|
|
},
|
|
|
|
Spec: extensions.ScaleSpec{
|
|
|
|
Replicas: tc.initialReplicas,
|
|
|
|
},
|
|
|
|
Status: extensions.ScaleStatus{
|
|
|
|
Replicas: tc.initialReplicas,
|
|
|
|
TargetSelector: "cheddar cheese",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return true, obj, nil
|
|
|
|
})
|
|
|
|
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConditionFailedGetMetrics(t *testing.T) {
|
|
|
|
metricsTargets := map[string][]autoscalingv2.MetricSpec{
|
|
|
|
"FailedGetResourceMetric": nil,
|
|
|
|
"FailedGetPodsMetric": {
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.PodsMetricSourceType,
|
|
|
|
Pods: &autoscalingv2.PodsMetricSource{
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetAverageValue: resource.MustParse("15.0"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"FailedGetObjectMetric": {
|
|
|
|
{
|
|
|
|
Type: autoscalingv2.ObjectMetricSourceType,
|
|
|
|
Object: &autoscalingv2.ObjectMetricSource{
|
|
|
|
Target: autoscalingv2.CrossVersionObjectReference{
|
|
|
|
APIVersion: "extensions/v1beta1",
|
|
|
|
Kind: "Deployment",
|
|
|
|
Name: "some-deployment",
|
|
|
|
},
|
|
|
|
MetricName: "qps",
|
|
|
|
TargetValue: resource.MustParse("15.0"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for reason, specs := range metricsTargets {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 100,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 10,
|
|
|
|
reportedLevels: []uint64{100, 200, 300},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
}
|
|
|
|
_, testMetricsClient, testCMClient := tc.prepareTestClient(t)
|
|
|
|
tc.testMetricsClient = testMetricsClient
|
|
|
|
tc.testCMClient = testCMClient
|
|
|
|
|
|
|
|
testMetricsClient.PrependReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2017-09-09 18:53:34 +00:00
|
|
|
return true, &metricsapi.PodMetricsList{}, fmt.Errorf("something went wrong")
|
2017-05-24 21:09:47 +00:00
|
|
|
})
|
|
|
|
testCMClient.PrependReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2017-09-09 18:53:34 +00:00
|
|
|
return true, &cmapi.MetricValueList{}, fmt.Errorf("something went wrong")
|
2017-05-24 21:09:47 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
tc.expectedConditions = []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
|
|
|
|
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: reason},
|
|
|
|
}
|
|
|
|
if specs != nil {
|
|
|
|
tc.CPUTarget = 0
|
|
|
|
} else {
|
|
|
|
tc.CPUTarget = 10
|
|
|
|
}
|
|
|
|
tc.metricsTarget = specs
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConditionInvalidSourceType(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 0,
|
|
|
|
metricsTarget: []autoscalingv2.MetricSpec{
|
|
|
|
{
|
|
|
|
Type: "CheddarCheese",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
reportedLevels: []uint64{20000},
|
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{
|
|
|
|
Type: autoscalingv1.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "SucceededGetScale",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: autoscalingv1.ScalingActive,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "InvalidMetricSourceType",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConditionFailedGetScale(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 100,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
2016-10-17 15:14:15 +00:00
|
|
|
CPUTarget: 10,
|
|
|
|
reportedLevels: []uint64{100, 200, 300},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{
|
|
|
|
Type: autoscalingv1.AbleToScale,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "FailedGetScale",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
testClient, _, _ := tc.prepareTestClient(t)
|
|
|
|
tc.testClient = testClient
|
|
|
|
|
|
|
|
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2017-09-09 18:53:34 +00:00
|
|
|
return true, &extensions.Scale{}, fmt.Errorf("something went wrong")
|
2017-05-24 21:09:47 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConditionFailedUpdateScale(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 100,
|
|
|
|
reportedLevels: []uint64{150, 150, 150},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "FailedUpdateScale",
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
testClient, _, _ := tc.prepareTestClient(t)
|
|
|
|
tc.testClient = testClient
|
|
|
|
|
|
|
|
testClient.PrependReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2017-09-09 18:53:34 +00:00
|
|
|
return true, &extensions.Scale{}, fmt.Errorf("something went wrong")
|
2017-05-24 21:09:47 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBackoffUpscale(t *testing.T) {
|
|
|
|
time := metav1.Time{Time: time.Now()}
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 100,
|
|
|
|
reportedLevels: []uint64{150, 150, 150},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
lastScaleTime: &time,
|
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
}, autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "BackoffBoth",
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBackoffDownscale(t *testing.T) {
|
|
|
|
time := metav1.Time{Time: time.Now().Add(-4 * time.Minute)}
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 1,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 4,
|
|
|
|
desiredReplicas: 4,
|
|
|
|
CPUTarget: 100,
|
|
|
|
reportedLevels: []uint64{50, 50, 50},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-05-24 21:09:47 +00:00
|
|
|
lastScaleTime: &time,
|
|
|
|
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
}, autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionFalse,
|
|
|
|
Reason: "BackoffDownscale",
|
|
|
|
}),
|
2016-10-17 15:14:15 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-05-06 21:52:30 +00:00
|
|
|
// TestComputedToleranceAlgImplementation is a regression test which
|
|
|
|
// back-calculates a minimal percentage for downscaling based on a small percentage
|
|
|
|
// increase in pod utilization which is calibrated against the tolerance value.
|
|
|
|
func TestComputedToleranceAlgImplementation(t *testing.T) {
|
|
|
|
|
|
|
|
startPods := int32(10)
|
|
|
|
// 150 mCPU per pod.
|
|
|
|
totalUsedCPUOfAllPods := uint64(startPods * 150)
|
|
|
|
// Each pod starts out asking for 2X what is really needed.
|
|
|
|
// This means we will have a 50% ratio of used/requested
|
|
|
|
totalRequestedCPUOfAllPods := int32(2 * totalUsedCPUOfAllPods)
|
|
|
|
requestedToUsed := float64(totalRequestedCPUOfAllPods / int32(totalUsedCPUOfAllPods))
|
|
|
|
// Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels.
|
|
|
|
perPodRequested := totalRequestedCPUOfAllPods / startPods
|
|
|
|
|
|
|
|
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
|
|
|
|
target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01
|
2017-09-09 18:53:34 +00:00
|
|
|
finalCPUPercentTarget := int32(target * 100)
|
2016-05-06 21:52:30 +00:00
|
|
|
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
|
|
|
|
|
|
|
|
// i.e. .60 * 20 -> scaled down expectation.
|
|
|
|
finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
|
|
|
|
|
|
|
|
// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 0,
|
|
|
|
maxReplicas: 1000,
|
|
|
|
initialReplicas: startPods,
|
|
|
|
desiredReplicas: finalPods,
|
2017-09-09 18:53:34 +00:00
|
|
|
CPUTarget: finalCPUPercentTarget,
|
2016-05-06 21:52:30 +00:00
|
|
|
reportedLevels: []uint64{
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
totalUsedCPUOfAllPods / 10,
|
|
|
|
},
|
|
|
|
reportedCPURequests: []resource.Quantity{
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
|
|
|
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
|
|
|
},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2016-05-06 21:52:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tc.runTest(t)
|
|
|
|
|
|
|
|
// Reuse the data structure above, now testing "unscaling".
|
|
|
|
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
|
|
|
|
target = math.Abs(1/(requestedToUsed*(1-tolerance))) + .004
|
2017-09-09 18:53:34 +00:00
|
|
|
finalCPUPercentTarget = int32(target * 100)
|
|
|
|
tc.CPUTarget = finalCPUPercentTarget
|
2016-05-06 21:52:30 +00:00
|
|
|
tc.initialReplicas = startPods
|
|
|
|
tc.desiredReplicas = startPods
|
2017-05-24 21:09:47 +00:00
|
|
|
tc.expectedConditions = statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
|
|
|
Type: autoscalingv2.AbleToScale,
|
|
|
|
Status: v1.ConditionTrue,
|
|
|
|
Reason: "ReadyForNewScale",
|
|
|
|
})
|
2016-05-06 21:52:30 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-11-21 09:32:00 +00:00
|
|
|
func TestScaleUpRCImmediately(t *testing.T) {
|
2016-12-03 18:57:26 +00:00
|
|
|
time := metav1.Time{Time: time.Now()}
|
2016-11-21 09:32:00 +00:00
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 1,
|
|
|
|
desiredReplicas: 2,
|
2016-12-02 20:18:26 +00:00
|
|
|
verifyCPUCurrent: false,
|
2016-11-21 09:32:00 +00:00
|
|
|
reportedLevels: []uint64{0, 0, 0, 0},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2016-11-21 09:32:00 +00:00
|
|
|
lastScaleTime: &time,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
|
|
|
|
},
|
2016-11-21 09:32:00 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScaleDownRCImmediately(t *testing.T) {
|
2016-12-03 18:57:26 +00:00
|
|
|
time := metav1.Time{Time: time.Now()}
|
2016-11-21 09:32:00 +00:00
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 5,
|
|
|
|
initialReplicas: 6,
|
|
|
|
desiredReplicas: 5,
|
|
|
|
CPUTarget: 50,
|
|
|
|
reportedLevels: []uint64{8000, 9500, 1000},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2016-11-21 09:32:00 +00:00
|
|
|
lastScaleTime: &time,
|
2017-05-24 21:09:47 +00:00
|
|
|
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
|
|
|
|
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
|
|
|
|
},
|
2016-11-21 09:32:00 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2017-06-06 19:57:05 +00:00
|
|
|
func TestAvoidUncessaryUpdates(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
minReplicas: 2,
|
|
|
|
maxReplicas: 6,
|
|
|
|
initialReplicas: 3,
|
|
|
|
desiredReplicas: 3,
|
|
|
|
CPUTarget: 30,
|
|
|
|
CPUCurrent: 40,
|
|
|
|
verifyCPUCurrent: true,
|
|
|
|
reportedLevels: []uint64{400, 500, 700},
|
|
|
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
|
|
|
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
2017-09-09 18:53:34 +00:00
|
|
|
useMetricsAPI: true,
|
2017-06-06 19:57:05 +00:00
|
|
|
}
|
|
|
|
testClient, _, _ := tc.prepareTestClient(t)
|
|
|
|
tc.testClient = testClient
|
|
|
|
var savedHPA *autoscalingv1.HorizontalPodAutoscaler
|
|
|
|
testClient.PrependReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
|
|
|
if savedHPA != nil {
|
|
|
|
// fake out the verification logic and mark that we're done processing
|
|
|
|
go func() {
|
|
|
|
// wait a tick and then mark that we're finished (otherwise, we have no
|
|
|
|
// way to indicate that we're finished, because the function decides not to do anything)
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
tc.statusUpdated = true
|
|
|
|
tc.processed <- "test-hpa"
|
|
|
|
}()
|
|
|
|
return true, &autoscalingv1.HorizontalPodAutoscalerList{
|
|
|
|
Items: []autoscalingv1.HorizontalPodAutoscaler{*savedHPA},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// fallthrough
|
|
|
|
return false, nil, nil
|
|
|
|
})
|
|
|
|
testClient.PrependReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
|
|
|
tc.Lock()
|
|
|
|
defer tc.Unlock()
|
|
|
|
|
|
|
|
if savedHPA == nil {
|
|
|
|
// save the HPA and return it
|
|
|
|
savedHPA = action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler)
|
|
|
|
return true, savedHPA, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.Fail(t, "should not have attempted to update the HPA when nothing changed")
|
|
|
|
// mark that we've processed this HPA
|
|
|
|
tc.processed <- ""
|
|
|
|
return true, nil, fmt.Errorf("unexpected call")
|
|
|
|
})
|
|
|
|
|
|
|
|
controller, informerFactory := tc.setupController(t)
|
|
|
|
|
|
|
|
// fake an initial processing loop to populate savedHPA
|
|
|
|
initialHPAs, err := testClient.Autoscaling().HorizontalPodAutoscalers("test-namespace").List(metav1.ListOptions{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
if err := controller.reconcileAutoscaler(&initialHPAs.Items[0]); err != nil {
|
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// actually run the test
|
|
|
|
tc.runTestWithController(t, controller, informerFactory)
|
|
|
|
}
|
|
|
|
|
2015-12-13 08:54:43 +00:00
|
|
|
// TODO: add more tests
|