2015-08-20 12:55:28 +00:00
/ *
2016-06-03 00:25:58 +00:00
Copyright 2015 The Kubernetes Authors .
2015-08-20 12:55:28 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2015-09-10 13:10:07 +00:00
package podautoscaler
2015-08-20 12:55:28 +00:00
import (
2017-05-24 21:09:47 +00:00
"encoding/json"
2015-08-20 12:55:28 +00:00
"fmt"
2016-05-06 21:52:30 +00:00
"math"
2016-04-20 16:57:36 +00:00
"sync"
2015-08-20 12:55:28 +00:00
"testing"
2015-09-14 08:14:32 +00:00
"time"
2015-08-20 12:55:28 +00:00
2017-06-22 18:24:23 +00:00
autoscalingv1 "k8s.io/api/autoscaling/v1"
2018-06-28 18:28:13 +00:00
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
2017-06-22 18:24:23 +00:00
"k8s.io/api/core/v1"
2018-04-26 15:55:50 +00:00
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
2017-01-25 13:13:07 +00:00
"k8s.io/apimachinery/pkg/api/resource"
2017-01-11 14:09:48 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2017-10-11 18:31:04 +00:00
"k8s.io/apimachinery/pkg/labels"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/runtime"
2017-02-20 06:17:16 +00:00
"k8s.io/apimachinery/pkg/runtime/schema"
2017-01-11 14:09:48 +00:00
"k8s.io/apimachinery/pkg/watch"
2017-06-23 20:56:37 +00:00
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
2017-10-11 18:31:04 +00:00
scalefake "k8s.io/client-go/scale/fake"
2017-01-25 20:07:10 +00:00
core "k8s.io/client-go/testing"
2017-10-16 11:41:50 +00:00
"k8s.io/kubernetes/pkg/api/legacyscheme"
2017-05-24 21:09:47 +00:00
"k8s.io/kubernetes/pkg/apis/autoscaling"
2017-02-09 19:59:19 +00:00
"k8s.io/kubernetes/pkg/controller"
2015-09-10 13:10:07 +00:00
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
2018-06-28 18:28:13 +00:00
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
2018-02-21 10:19:51 +00:00
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
2017-08-30 18:53:13 +00:00
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
2018-06-29 19:17:38 +00:00
metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake"
2017-10-16 11:41:50 +00:00
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
2018-02-21 18:05:26 +00:00
emfake "k8s.io/metrics/pkg/client/external_metrics/fake"
2015-09-14 08:14:32 +00:00
2015-12-13 08:54:43 +00:00
"github.com/stretchr/testify/assert"
2017-01-30 18:39:54 +00:00
2018-12-19 16:18:53 +00:00
_ "k8s.io/kubernetes/pkg/apis/apps/install"
2017-01-30 18:39:54 +00:00
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
2015-08-20 12:55:28 +00:00
)
2017-05-24 21:09:47 +00:00
var statusOk = [ ] autoscalingv2 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv2 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededRescale" } ,
{ Type : autoscalingv2 . ScalingActive , Status : v1 . ConditionTrue , Reason : "ValidMetricFound" } ,
{ Type : autoscalingv2 . ScalingLimited , Status : v1 . ConditionFalse , Reason : "DesiredWithinRange" } ,
}
// statusOkWithOverrides returns the "ok" status with the given conditions as overridden
func statusOkWithOverrides ( overrides ... autoscalingv2 . HorizontalPodAutoscalerCondition ) [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
resv2 := make ( [ ] autoscalingv2 . HorizontalPodAutoscalerCondition , len ( statusOk ) )
copy ( resv2 , statusOk )
for _ , override := range overrides {
resv2 = setConditionInList ( resv2 , override . Type , override . Status , override . Reason , override . Message )
}
// copy to a v1 slice
resv1 := make ( [ ] autoscalingv1 . HorizontalPodAutoscalerCondition , len ( resv2 ) )
for i , cond := range resv2 {
resv1 [ i ] = autoscalingv1 . HorizontalPodAutoscalerCondition {
Type : autoscalingv1 . HorizontalPodAutoscalerConditionType ( cond . Type ) ,
Status : cond . Status ,
Reason : cond . Reason ,
}
}
return resv1
}
2017-02-09 19:59:19 +00:00
func alwaysReady ( ) bool { return true }
2016-03-09 00:27:13 +00:00
type fakeResource struct {
name string
apiVersion string
kind string
}
2015-09-14 08:14:32 +00:00
type testCase struct {
2016-04-20 16:57:36 +00:00
sync . Mutex
2016-04-27 04:35:14 +00:00
minReplicas int32
maxReplicas int32
initialReplicas int32
2016-04-20 16:57:36 +00:00
2015-10-13 15:24:23 +00:00
// CPU target utilization as a percentage of the requested resources.
2018-08-28 16:27:47 +00:00
CPUTarget int32
CPUCurrent int32
verifyCPUCurrent bool
reportedLevels [ ] uint64
reportedCPURequests [ ] resource . Quantity
reportedPodReadiness [ ] v1 . ConditionStatus
reportedPodStartTime [ ] metav1 . Time
reportedPodPhase [ ] v1 . PodPhase
reportedPodDeletionTimestamp [ ] bool
scaleUpdated bool
statusUpdated bool
eventCreated bool
verifyEvents bool
useMetricsAPI bool
metricsTarget [ ] autoscalingv2 . MetricSpec
expectedDesiredReplicas int32
expectedConditions [ ] autoscalingv1 . HorizontalPodAutoscalerCondition
2016-03-02 09:08:17 +00:00
// Channel with names of HPA objects which we have reconciled.
processed chan string
2016-03-09 00:27:13 +00:00
// Target resource information.
resource * fakeResource
2016-11-21 09:32:00 +00:00
// Last scale time
2016-12-03 18:57:26 +00:00
lastScaleTime * metav1 . Time
2017-05-24 21:09:47 +00:00
// override the test clients
testClient * fake . Clientset
testMetricsClient * metricsfake . Clientset
testCMClient * cmfake . FakeCustomMetricsClient
2018-03-19 14:22:36 +00:00
testEMClient * emfake . FakeExternalMetricsClient
2017-10-11 18:31:04 +00:00
testScaleClient * scalefake . FakeScaleClient
2018-08-31 07:32:01 +00:00
recommendations [ ] timestampedRecommendation
2015-08-28 10:24:00 +00:00
}
2016-04-20 16:57:36 +00:00
// Needs to be called under a lock.
2016-02-23 14:18:49 +00:00
func ( tc * testCase ) computeCPUCurrent ( ) {
if len ( tc . reportedLevels ) != len ( tc . reportedCPURequests ) || len ( tc . reportedLevels ) == 0 {
return
}
reported := 0
for _ , r := range tc . reportedLevels {
reported += int ( r )
}
requested := 0
for _ , req := range tc . reportedCPURequests {
requested += int ( req . MilliValue ( ) )
}
2016-04-27 04:35:14 +00:00
tc . CPUCurrent = int32 ( 100 * reported / requested )
2016-02-23 14:18:49 +00:00
}
2017-05-24 21:09:47 +00:00
func init ( ) {
// set this high so we don't accidentally run into it when testing
scaleUpLimitFactor = 8
}
2018-02-21 18:05:26 +00:00
func ( tc * testCase ) prepareTestClient ( t * testing . T ) ( * fake . Clientset , * metricsfake . Clientset , * cmfake . FakeCustomMetricsClient , * emfake . FakeExternalMetricsClient , * scalefake . FakeScaleClient ) {
2015-09-14 08:14:32 +00:00
namespace := "test-namespace"
hpaName := "test-hpa"
podNamePrefix := "test-pod"
2017-10-11 18:31:04 +00:00
labelSet := map [ string ] string { "name" : podNamePrefix }
selector := labels . SelectorFromSet ( labelSet ) . String ( )
2015-09-14 08:14:32 +00:00
2016-04-20 16:57:36 +00:00
tc . Lock ( )
2015-09-14 08:14:32 +00:00
tc . scaleUpdated = false
2016-02-23 12:05:07 +00:00
tc . statusUpdated = false
2015-09-14 08:14:32 +00:00
tc . eventCreated = false
2016-03-02 09:08:17 +00:00
tc . processed = make ( chan string , 100 )
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
if tc . CPUCurrent == 0 {
tc . computeCPUCurrent ( )
}
2015-09-14 08:14:32 +00:00
2016-03-09 00:27:13 +00:00
if tc . resource == nil {
tc . resource = & fakeResource {
name : "test-rc" ,
2017-10-11 18:31:04 +00:00
apiVersion : "v1" ,
kind : "ReplicationController" ,
2016-03-09 00:27:13 +00:00
}
}
2016-04-20 16:57:36 +00:00
tc . Unlock ( )
2016-03-09 00:27:13 +00:00
2016-01-29 06:34:08 +00:00
fakeClient := & fake . Clientset { }
fakeClient . AddReactor ( "list" , "horizontalpodautoscalers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
2016-04-20 16:57:36 +00:00
tc . Lock ( )
defer tc . Unlock ( )
2016-12-02 20:18:26 +00:00
obj := & autoscalingv2 . HorizontalPodAutoscalerList {
Items : [ ] autoscalingv2 . HorizontalPodAutoscaler {
2015-09-14 08:14:32 +00:00
{
2017-01-17 03:38:19 +00:00
ObjectMeta : metav1 . ObjectMeta {
2015-09-14 08:14:32 +00:00
Name : hpaName ,
Namespace : namespace ,
SelfLink : "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName ,
} ,
2016-12-02 20:18:26 +00:00
Spec : autoscalingv2 . HorizontalPodAutoscalerSpec {
ScaleTargetRef : autoscalingv2 . CrossVersionObjectReference {
2016-05-05 10:27:24 +00:00
Kind : tc . resource . kind ,
Name : tc . resource . name ,
APIVersion : tc . resource . apiVersion ,
2015-09-14 08:14:32 +00:00
} ,
2015-10-13 15:24:23 +00:00
MinReplicas : & tc . minReplicas ,
2015-09-17 12:08:39 +00:00
MaxReplicas : tc . maxReplicas ,
2015-09-14 08:14:32 +00:00
} ,
2016-12-02 20:18:26 +00:00
Status : autoscalingv2 . HorizontalPodAutoscalerStatus {
2016-02-23 12:05:07 +00:00
CurrentReplicas : tc . initialReplicas ,
DesiredReplicas : tc . initialReplicas ,
2017-05-24 21:09:47 +00:00
LastScaleTime : tc . lastScaleTime ,
2016-02-23 12:05:07 +00:00
} ,
2015-09-14 08:14:32 +00:00
} ,
} ,
}
2016-04-20 16:57:36 +00:00
2017-05-24 21:09:47 +00:00
if tc . CPUTarget > 0 {
2016-12-02 20:18:26 +00:00
obj . Items [ 0 ] . Spec . Metrics = [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ResourceMetricSourceType ,
Resource : & autoscalingv2 . ResourceMetricSource {
Name : v1 . ResourceCPU ,
2018-06-28 18:28:13 +00:00
Target : autoscalingv2 . MetricTarget {
AverageUtilization : & tc . CPUTarget ,
} ,
2016-12-02 20:18:26 +00:00
} ,
} ,
}
}
if len ( tc . metricsTarget ) > 0 {
obj . Items [ 0 ] . Spec . Metrics = append ( obj . Items [ 0 ] . Spec . Metrics , tc . metricsTarget ... )
2015-10-13 15:24:23 +00:00
}
2016-12-02 20:18:26 +00:00
if len ( obj . Items [ 0 ] . Spec . Metrics ) == 0 {
// manually add in the defaulting logic
obj . Items [ 0 ] . Spec . Metrics = [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ResourceMetricSourceType ,
Resource : & autoscalingv2 . ResourceMetricSource {
Name : v1 . ResourceCPU ,
} ,
} ,
2016-01-29 11:20:19 +00:00
}
}
2016-12-02 20:18:26 +00:00
// and... convert to autoscaling v1 to return the right type
2017-09-25 13:43:04 +00:00
objv1 , err := unsafeConvertToVersionVia ( obj , autoscalingv1 . SchemeGroupVersion )
2016-12-02 20:18:26 +00:00
if err != nil {
return true , nil , err
}
return true , objv1 , nil
2015-09-14 08:14:32 +00:00
} )
2016-01-29 06:34:08 +00:00
fakeClient . AddReactor ( "list" , "pods" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
2016-04-20 16:57:36 +00:00
tc . Lock ( )
defer tc . Unlock ( )
2016-11-18 20:50:17 +00:00
obj := & v1 . PodList { }
`GetObjectMetricReplicas` ignores unready pods
Previously, when `GetObjectMetricReplicas` calculated the desired
replica count, it multiplied the usage ratio by the current number of replicas.
This method caused over-scaling when there were pods that were not ready
for a long period of time. For example, if there were pods A, B, and C,
and only pod A was ready, and the usage ratio was 500%, we would
previously specify 15 pods as the desired replicas (even though really
only one pod was handling the load).
After this change, we now multiple the usage
ratio by the number of ready pods for `GetObjectMetricReplicas`.
In the example above, we'd only desire 5 replica pods.
This change gives `GetObjectMetricReplicas` the same behavior as the
other replica calculator methods. Only `GetExternalMetricReplicas` and
`GetExternalPerPodMetricRepliacs` still allow unready pods to impact the
number of desired replicas. I will fix this issue in the following
commit.
2018-03-06 14:51:49 +00:00
specifiedCPURequests := tc . reportedCPURequests != nil
numPodsToCreate := int ( tc . initialReplicas )
if specifiedCPURequests {
numPodsToCreate = len ( tc . reportedCPURequests )
}
for i := 0 ; i < numPodsToCreate ; i ++ {
2016-11-18 20:50:17 +00:00
podReadiness := v1 . ConditionTrue
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
if tc . reportedPodReadiness != nil {
podReadiness = tc . reportedPodReadiness [ i ]
}
2018-08-08 13:00:17 +00:00
var podStartTime metav1 . Time
if tc . reportedPodStartTime != nil {
podStartTime = tc . reportedPodStartTime [ i ]
}
`GetObjectMetricReplicas` ignores unready pods
Previously, when `GetObjectMetricReplicas` calculated the desired
replica count, it multiplied the usage ratio by the current number of replicas.
This method caused over-scaling when there were pods that were not ready
for a long period of time. For example, if there were pods A, B, and C,
and only pod A was ready, and the usage ratio was 500%, we would
previously specify 15 pods as the desired replicas (even though really
only one pod was handling the load).
After this change, we now multiple the usage
ratio by the number of ready pods for `GetObjectMetricReplicas`.
In the example above, we'd only desire 5 replica pods.
This change gives `GetObjectMetricReplicas` the same behavior as the
other replica calculator methods. Only `GetExternalMetricReplicas` and
`GetExternalPerPodMetricRepliacs` still allow unready pods to impact the
number of desired replicas. I will fix this issue in the following
commit.
2018-03-06 14:51:49 +00:00
2018-02-20 18:14:43 +00:00
podPhase := v1 . PodRunning
if tc . reportedPodPhase != nil {
podPhase = tc . reportedPodPhase [ i ]
}
`GetObjectMetricReplicas` ignores unready pods
Previously, when `GetObjectMetricReplicas` calculated the desired
replica count, it multiplied the usage ratio by the current number of replicas.
This method caused over-scaling when there were pods that were not ready
for a long period of time. For example, if there were pods A, B, and C,
and only pod A was ready, and the usage ratio was 500%, we would
previously specify 15 pods as the desired replicas (even though really
only one pod was handling the load).
After this change, we now multiple the usage
ratio by the number of ready pods for `GetObjectMetricReplicas`.
In the example above, we'd only desire 5 replica pods.
This change gives `GetObjectMetricReplicas` the same behavior as the
other replica calculator methods. Only `GetExternalMetricReplicas` and
`GetExternalPerPodMetricRepliacs` still allow unready pods to impact the
number of desired replicas. I will fix this issue in the following
commit.
2018-03-06 14:51:49 +00:00
2018-08-28 16:27:47 +00:00
podDeletionTimestamp := false
if tc . reportedPodDeletionTimestamp != nil {
podDeletionTimestamp = tc . reportedPodDeletionTimestamp [ i ]
}
2015-09-14 08:14:32 +00:00
podName := fmt . Sprintf ( "%s-%d" , podNamePrefix , i )
`GetObjectMetricReplicas` ignores unready pods
Previously, when `GetObjectMetricReplicas` calculated the desired
replica count, it multiplied the usage ratio by the current number of replicas.
This method caused over-scaling when there were pods that were not ready
for a long period of time. For example, if there were pods A, B, and C,
and only pod A was ready, and the usage ratio was 500%, we would
previously specify 15 pods as the desired replicas (even though really
only one pod was handling the load).
After this change, we now multiple the usage
ratio by the number of ready pods for `GetObjectMetricReplicas`.
In the example above, we'd only desire 5 replica pods.
This change gives `GetObjectMetricReplicas` the same behavior as the
other replica calculator methods. Only `GetExternalMetricReplicas` and
`GetExternalPerPodMetricRepliacs` still allow unready pods to impact the
number of desired replicas. I will fix this issue in the following
commit.
2018-03-06 14:51:49 +00:00
reportedCPURequest := resource . MustParse ( "1.0" )
if specifiedCPURequests {
reportedCPURequest = tc . reportedCPURequests [ i ]
}
2016-11-18 20:50:17 +00:00
pod := v1 . Pod {
Status : v1 . PodStatus {
2018-02-20 18:14:43 +00:00
Phase : podPhase ,
2016-11-18 20:50:17 +00:00
Conditions : [ ] v1 . PodCondition {
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
{
2018-08-28 13:04:05 +00:00
Type : v1 . PodReady ,
Status : podReadiness ,
LastTransitionTime : podStartTime ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
} ,
} ,
2018-08-08 13:00:17 +00:00
StartTime : & podStartTime ,
2015-09-14 08:14:32 +00:00
} ,
2017-01-17 03:38:19 +00:00
ObjectMeta : metav1 . ObjectMeta {
2015-09-14 08:14:32 +00:00
Name : podName ,
Namespace : namespace ,
Labels : map [ string ] string {
"name" : podNamePrefix ,
} ,
} ,
`GetObjectMetricReplicas` ignores unready pods
Previously, when `GetObjectMetricReplicas` calculated the desired
replica count, it multiplied the usage ratio by the current number of replicas.
This method caused over-scaling when there were pods that were not ready
for a long period of time. For example, if there were pods A, B, and C,
and only pod A was ready, and the usage ratio was 500%, we would
previously specify 15 pods as the desired replicas (even though really
only one pod was handling the load).
After this change, we now multiple the usage
ratio by the number of ready pods for `GetObjectMetricReplicas`.
In the example above, we'd only desire 5 replica pods.
This change gives `GetObjectMetricReplicas` the same behavior as the
other replica calculator methods. Only `GetExternalMetricReplicas` and
`GetExternalPerPodMetricRepliacs` still allow unready pods to impact the
number of desired replicas. I will fix this issue in the following
commit.
2018-03-06 14:51:49 +00:00
2016-11-18 20:50:17 +00:00
Spec : v1 . PodSpec {
Containers : [ ] v1 . Container {
2015-10-13 15:24:23 +00:00
{
2016-11-18 20:50:17 +00:00
Resources : v1 . ResourceRequirements {
Requests : v1 . ResourceList {
`GetObjectMetricReplicas` ignores unready pods
Previously, when `GetObjectMetricReplicas` calculated the desired
replica count, it multiplied the usage ratio by the current number of replicas.
This method caused over-scaling when there were pods that were not ready
for a long period of time. For example, if there were pods A, B, and C,
and only pod A was ready, and the usage ratio was 500%, we would
previously specify 15 pods as the desired replicas (even though really
only one pod was handling the load).
After this change, we now multiple the usage
ratio by the number of ready pods for `GetObjectMetricReplicas`.
In the example above, we'd only desire 5 replica pods.
This change gives `GetObjectMetricReplicas` the same behavior as the
other replica calculator methods. Only `GetExternalMetricReplicas` and
`GetExternalPerPodMetricRepliacs` still allow unready pods to impact the
number of desired replicas. I will fix this issue in the following
commit.
2018-03-06 14:51:49 +00:00
v1 . ResourceCPU : reportedCPURequest ,
2015-10-13 15:24:23 +00:00
} ,
} ,
} ,
} ,
} ,
2015-09-14 08:14:32 +00:00
}
2018-08-28 16:27:47 +00:00
if podDeletionTimestamp {
pod . DeletionTimestamp = & metav1 . Time { Time : time . Now ( ) }
}
2015-09-14 08:14:32 +00:00
obj . Items = append ( obj . Items , pod )
}
return true , obj , nil
} )
2016-01-29 06:34:08 +00:00
fakeClient . AddReactor ( "update" , "horizontalpodautoscalers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
2016-04-20 16:57:36 +00:00
tc . Lock ( )
defer tc . Unlock ( )
2016-12-02 20:18:26 +00:00
obj := action . ( core . UpdateAction ) . GetObject ( ) . ( * autoscalingv1 . HorizontalPodAutoscaler )
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
assert . Equal ( t , namespace , obj . Namespace , "the HPA namespace should be as expected" )
assert . Equal ( t , hpaName , obj . Name , "the HPA name should be as expected" )
2018-07-19 12:42:28 +00:00
assert . Equal ( t , tc . expectedDesiredReplicas , obj . Status . DesiredReplicas , "the desired replica count reported in the object status should be as expected" )
2016-02-23 14:18:49 +00:00
if tc . verifyCPUCurrent {
2017-10-11 18:31:04 +00:00
if assert . NotNil ( t , obj . Status . CurrentCPUUtilizationPercentage , "the reported CPU utilization percentage should be non-nil" ) {
assert . Equal ( t , tc . CPUCurrent , * obj . Status . CurrentCPUUtilizationPercentage , "the report CPU utilization percentage should be as expected" )
}
2016-02-23 14:18:49 +00:00
}
2017-05-24 21:09:47 +00:00
var actualConditions [ ] autoscalingv1 . HorizontalPodAutoscalerCondition
if err := json . Unmarshal ( [ ] byte ( obj . ObjectMeta . Annotations [ autoscaling . HorizontalPodAutoscalerConditionsAnnotation ] ) , & actualConditions ) ; err != nil {
return true , nil , err
}
// TODO: it's ok not to sort these becaues statusOk
// contains all the conditions, so we'll never be appending.
// Default to statusOk when missing any specific conditions
if tc . expectedConditions == nil {
tc . expectedConditions = statusOkWithOverrides ( )
}
// clear the message so that we can easily compare
for i := range actualConditions {
actualConditions [ i ] . Message = ""
actualConditions [ i ] . LastTransitionTime = metav1 . Time { }
}
assert . Equal ( t , tc . expectedConditions , actualConditions , "the status conditions should have been as expected" )
2016-03-02 08:29:17 +00:00
tc . statusUpdated = true
2016-03-02 09:08:17 +00:00
// Every time we reconcile HPA object we are updating status.
tc . processed <- obj . Name
2015-09-14 08:14:32 +00:00
return true , obj , nil
} )
2017-10-11 18:31:04 +00:00
fakeScaleClient := & scalefake . FakeScaleClient { }
fakeScaleClient . AddReactor ( "get" , "replicationcontrollers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
obj := & autoscalingv1 . Scale {
ObjectMeta : metav1 . ObjectMeta {
Name : tc . resource . name ,
Namespace : namespace ,
} ,
Spec : autoscalingv1 . ScaleSpec {
Replicas : tc . initialReplicas ,
} ,
Status : autoscalingv1 . ScaleStatus {
Replicas : tc . initialReplicas ,
Selector : selector ,
} ,
}
return true , obj , nil
} )
fakeScaleClient . AddReactor ( "get" , "deployments" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
obj := & autoscalingv1 . Scale {
ObjectMeta : metav1 . ObjectMeta {
Name : tc . resource . name ,
Namespace : namespace ,
} ,
Spec : autoscalingv1 . ScaleSpec {
Replicas : tc . initialReplicas ,
} ,
Status : autoscalingv1 . ScaleStatus {
Replicas : tc . initialReplicas ,
Selector : selector ,
} ,
}
return true , obj , nil
} )
fakeScaleClient . AddReactor ( "get" , "replicasets" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
obj := & autoscalingv1 . Scale {
ObjectMeta : metav1 . ObjectMeta {
Name : tc . resource . name ,
Namespace : namespace ,
} ,
Spec : autoscalingv1 . ScaleSpec {
Replicas : tc . initialReplicas ,
} ,
Status : autoscalingv1 . ScaleStatus {
Replicas : tc . initialReplicas ,
Selector : selector ,
} ,
}
return true , obj , nil
} )
fakeScaleClient . AddReactor ( "update" , "replicationcontrollers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
obj := action . ( core . UpdateAction ) . GetObject ( ) . ( * autoscalingv1 . Scale )
replicas := action . ( core . UpdateAction ) . GetObject ( ) . ( * autoscalingv1 . Scale ) . Spec . Replicas
2018-07-19 12:42:28 +00:00
assert . Equal ( t , tc . expectedDesiredReplicas , replicas , "the replica count of the RC should be as expected" )
2017-10-11 18:31:04 +00:00
tc . scaleUpdated = true
return true , obj , nil
} )
fakeScaleClient . AddReactor ( "update" , "deployments" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
obj := action . ( core . UpdateAction ) . GetObject ( ) . ( * autoscalingv1 . Scale )
replicas := action . ( core . UpdateAction ) . GetObject ( ) . ( * autoscalingv1 . Scale ) . Spec . Replicas
2018-07-19 12:42:28 +00:00
assert . Equal ( t , tc . expectedDesiredReplicas , replicas , "the replica count of the deployment should be as expected" )
2017-10-11 18:31:04 +00:00
tc . scaleUpdated = true
return true , obj , nil
} )
fakeScaleClient . AddReactor ( "update" , "replicasets" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
obj := action . ( core . UpdateAction ) . GetObject ( ) . ( * autoscalingv1 . Scale )
replicas := action . ( core . UpdateAction ) . GetObject ( ) . ( * autoscalingv1 . Scale ) . Spec . Replicas
2018-07-19 12:42:28 +00:00
assert . Equal ( t , tc . expectedDesiredReplicas , replicas , "the replica count of the replicaset should be as expected" )
2017-10-11 18:31:04 +00:00
tc . scaleUpdated = true
return true , obj , nil
} )
2016-03-02 08:29:17 +00:00
fakeWatch := watch . NewFake ( )
fakeClient . AddWatchReactor ( "*" , core . DefaultWatchReactor ( fakeWatch , nil ) )
2017-02-20 06:17:16 +00:00
fakeMetricsClient := & metricsfake . Clientset { }
2017-05-03 22:11:22 +00:00
fakeMetricsClient . AddReactor ( "list" , "pods" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
2017-02-20 06:17:16 +00:00
tc . Lock ( )
defer tc . Unlock ( )
metrics := & metricsapi . PodMetricsList { }
for i , cpu := range tc . reportedLevels {
// NB: the list reactor actually does label selector filtering for us,
// so we have to make sure our results match the label selector
podMetric := metricsapi . PodMetrics {
ObjectMeta : metav1 . ObjectMeta {
Name : fmt . Sprintf ( "%s-%d" , podNamePrefix , i ) ,
Namespace : namespace ,
2017-10-11 18:31:04 +00:00
Labels : labelSet ,
2017-02-20 06:17:16 +00:00
} ,
Timestamp : metav1 . Time { Time : time . Now ( ) } ,
2018-08-28 13:04:05 +00:00
Window : metav1 . Duration { Duration : time . Minute } ,
2017-02-20 06:17:16 +00:00
Containers : [ ] metricsapi . ContainerMetrics {
{
Name : "container" ,
2017-07-15 05:25:54 +00:00
Usage : v1 . ResourceList {
v1 . ResourceCPU : * resource . NewMilliQuantity (
2017-02-20 06:17:16 +00:00
int64 ( cpu ) ,
resource . DecimalSI ) ,
2017-07-15 05:25:54 +00:00
v1 . ResourceMemory : * resource . NewQuantity (
2017-02-20 06:17:16 +00:00
int64 ( 1024 * 1024 ) ,
resource . BinarySI ) ,
} ,
} ,
} ,
}
metrics . Items = append ( metrics . Items , podMetric )
}
return true , metrics , nil
} )
fakeCMClient := & cmfake . FakeCustomMetricsClient { }
fakeCMClient . AddReactor ( "get" , "*" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
getForAction , wasGetFor := action . ( cmfake . GetForAction )
if ! wasGetFor {
return true , nil , fmt . Errorf ( "expected a get-for action, got %v instead" , action )
}
if getForAction . GetName ( ) == "*" {
metrics := & cmapi . MetricValueList { }
// multiple objects
assert . Equal ( t , "pods" , getForAction . GetResource ( ) . Resource , "the type of object that we requested multiple metrics for should have been pods" )
assert . Equal ( t , "qps" , getForAction . GetMetricName ( ) , "the metric name requested should have been qps, as specified in the metric spec" )
for i , level := range tc . reportedLevels {
podMetric := cmapi . MetricValue {
2017-07-15 05:25:54 +00:00
DescribedObject : v1 . ObjectReference {
2017-02-20 06:17:16 +00:00
Kind : "Pod" ,
Name : fmt . Sprintf ( "%s-%d" , podNamePrefix , i ) ,
Namespace : namespace ,
} ,
2018-06-28 18:28:13 +00:00
Timestamp : metav1 . Time { Time : time . Now ( ) } ,
Metric : cmapi . MetricIdentifier {
Name : "qps" ,
} ,
Value : * resource . NewMilliQuantity ( int64 ( level ) , resource . DecimalSI ) ,
2017-02-20 06:17:16 +00:00
}
metrics . Items = append ( metrics . Items , podMetric )
}
return true , metrics , nil
2017-09-09 18:53:34 +00:00
}
name := getForAction . GetName ( )
2018-05-07 12:32:20 +00:00
mapper := testrestmapper . TestOnlyStaticRESTMapper ( legacyscheme . Scheme )
2017-09-09 18:53:34 +00:00
metrics := & cmapi . MetricValueList { }
var matchedTarget * autoscalingv2 . MetricSpec
for i , target := range tc . metricsTarget {
2018-06-28 18:28:13 +00:00
if target . Type == autoscalingv2 . ObjectMetricSourceType && name == target . Object . DescribedObject . Name {
gk := schema . FromAPIVersionAndKind ( target . Object . DescribedObject . APIVersion , target . Object . DescribedObject . Kind ) . GroupKind ( )
2017-09-09 18:53:34 +00:00
mapping , err := mapper . RESTMapping ( gk )
if err != nil {
t . Logf ( "unable to get mapping for %s: %v" , gk . String ( ) , err )
continue
2017-02-20 06:17:16 +00:00
}
2018-05-01 17:02:44 +00:00
groupResource := mapping . Resource . GroupResource ( )
2017-02-20 06:17:16 +00:00
2017-09-09 18:53:34 +00:00
if getForAction . GetResource ( ) . Resource == groupResource . String ( ) {
matchedTarget = & tc . metricsTarget [ i ]
}
2017-02-20 06:17:16 +00:00
}
2017-09-09 18:53:34 +00:00
}
assert . NotNil ( t , matchedTarget , "this request should have matched one of the metric specs" )
assert . Equal ( t , "qps" , getForAction . GetMetricName ( ) , "the metric name requested should have been qps, as specified in the metric spec" )
2017-02-20 06:17:16 +00:00
2017-09-09 18:53:34 +00:00
metrics . Items = [ ] cmapi . MetricValue {
{
DescribedObject : v1 . ObjectReference {
2018-06-28 18:28:13 +00:00
Kind : matchedTarget . Object . DescribedObject . Kind ,
APIVersion : matchedTarget . Object . DescribedObject . APIVersion ,
2017-09-09 18:53:34 +00:00
Name : name ,
} ,
2018-06-28 18:28:13 +00:00
Timestamp : metav1 . Time { Time : time . Now ( ) } ,
Metric : cmapi . MetricIdentifier {
Name : "qps" ,
} ,
Value : * resource . NewMilliQuantity ( int64 ( tc . reportedLevels [ 0 ] ) , resource . DecimalSI ) ,
2017-09-09 18:53:34 +00:00
} ,
2017-02-20 06:17:16 +00:00
}
2017-09-09 18:53:34 +00:00
return true , metrics , nil
2017-02-20 06:17:16 +00:00
} )
2018-02-21 18:05:26 +00:00
fakeEMClient := & emfake . FakeExternalMetricsClient { }
2018-02-21 10:19:51 +00:00
fakeEMClient . AddReactor ( "list" , "*" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
listAction , wasList := action . ( core . ListAction )
if ! wasList {
return true , nil , fmt . Errorf ( "expected a list action, got %v instead" , action )
}
metrics := & emapi . ExternalMetricValueList { }
assert . Equal ( t , "qps" , listAction . GetResource ( ) . Resource , "the metric name requested should have been qps, as specified in the metric spec" )
for _ , level := range tc . reportedLevels {
metric := emapi . ExternalMetricValue {
Timestamp : metav1 . Time { Time : time . Now ( ) } ,
MetricName : "qps" ,
Value : * resource . NewMilliQuantity ( int64 ( level ) , resource . DecimalSI ) ,
}
metrics . Items = append ( metrics . Items , metric )
}
return true , metrics , nil
} )
2018-02-21 18:05:26 +00:00
return fakeClient , fakeMetricsClient , fakeCMClient , fakeEMClient , fakeScaleClient
2015-08-28 10:24:00 +00:00
}
2015-09-14 08:14:32 +00:00
func ( tc * testCase ) verifyResults ( t * testing . T ) {
2016-04-20 16:57:36 +00:00
tc . Lock ( )
defer tc . Unlock ( )
2018-07-19 12:42:28 +00:00
assert . Equal ( t , tc . initialReplicas != tc . expectedDesiredReplicas , tc . scaleUpdated , "the scale should only be updated if we expected a change in replicas" )
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
assert . True ( t , tc . statusUpdated , "the status should have been updated" )
2015-09-14 08:14:32 +00:00
if tc . verifyEvents {
2018-07-19 12:42:28 +00:00
assert . Equal ( t , tc . initialReplicas != tc . expectedDesiredReplicas , tc . eventCreated , "an event should have been created only if we expected a change in replicas" )
2015-08-28 10:24:00 +00:00
}
}
2017-06-06 19:57:05 +00:00
func ( tc * testCase ) setupController ( t * testing . T ) ( * HorizontalController , informers . SharedInformerFactory ) {
2018-02-21 18:05:26 +00:00
testClient , testMetricsClient , testCMClient , testEMClient , testScaleClient := tc . prepareTestClient ( t )
2017-05-24 21:09:47 +00:00
if tc . testClient != nil {
testClient = tc . testClient
}
if tc . testMetricsClient != nil {
testMetricsClient = tc . testMetricsClient
}
if tc . testCMClient != nil {
testCMClient = tc . testCMClient
}
2018-03-19 14:22:36 +00:00
if tc . testEMClient != nil {
testEMClient = tc . testEMClient
}
2017-10-11 18:31:04 +00:00
if tc . testScaleClient != nil {
testScaleClient = tc . testScaleClient
}
2017-02-20 06:17:16 +00:00
metricsClient := metrics . NewRESTMetricsClient (
2017-08-30 18:53:13 +00:00
testMetricsClient . MetricsV1beta1 ( ) ,
2017-02-20 06:17:16 +00:00
testCMClient ,
2018-02-21 18:05:26 +00:00
testEMClient ,
2017-02-20 06:17:16 +00:00
)
2016-03-03 10:48:07 +00:00
2018-01-11 11:15:11 +00:00
eventClient := & fake . Clientset { }
2017-03-08 07:02:34 +00:00
eventClient . AddReactor ( "create" , "events" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
2017-01-30 18:39:54 +00:00
tc . Lock ( )
defer tc . Unlock ( )
2017-07-15 05:25:54 +00:00
obj := action . ( core . CreateAction ) . GetObject ( ) . ( * v1 . Event )
2017-01-30 18:39:54 +00:00
if tc . verifyEvents {
switch obj . Reason {
case "SuccessfulRescale" :
2018-07-19 12:42:28 +00:00
assert . Equal ( t , fmt . Sprintf ( "New size: %d; reason: cpu resource utilization (percentage of request) above target" , tc . expectedDesiredReplicas ) , obj . Message )
2017-01-30 18:39:54 +00:00
case "DesiredReplicasComputed" :
assert . Equal ( t , fmt . Sprintf (
"Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)" ,
2018-07-19 12:42:28 +00:00
tc . expectedDesiredReplicas ,
2017-01-30 18:39:54 +00:00
( int64 ( tc . reportedLevels [ 0 ] ) * 100 ) / tc . reportedCPURequests [ 0 ] . MilliValue ( ) , tc . initialReplicas ) , obj . Message )
default :
assert . False ( t , true , fmt . Sprintf ( "Unexpected event: %s / %s" , obj . Reason , obj . Message ) )
}
}
tc . eventCreated = true
return true , obj , nil
} )
2017-02-08 21:18:21 +00:00
informerFactory := informers . NewSharedInformerFactory ( testClient , controller . NoResyncPeriodFunc ( ) )
2018-08-31 07:32:01 +00:00
defaultDownscalestabilizationWindow := 5 * time . Minute
2016-03-03 10:48:07 +00:00
2017-02-09 19:59:19 +00:00
hpaController := NewHorizontalController (
2019-02-22 15:27:46 +00:00
eventClient . CoreV1 ( ) ,
2017-10-11 18:31:04 +00:00
testScaleClient ,
2019-02-22 15:27:46 +00:00
testClient . AutoscalingV1 ( ) ,
2018-05-07 12:32:20 +00:00
testrestmapper . TestOnlyStaticRESTMapper ( legacyscheme . Scheme ) ,
2018-09-04 18:16:48 +00:00
metricsClient ,
2017-02-09 19:59:19 +00:00
informerFactory . Autoscaling ( ) . V1 ( ) . HorizontalPodAutoscalers ( ) ,
2018-09-04 18:16:48 +00:00
informerFactory . Core ( ) . V1 ( ) . Pods ( ) ,
2017-02-09 19:59:19 +00:00
controller . NoResyncPeriodFunc ( ) ,
2018-08-31 07:32:01 +00:00
defaultDownscalestabilizationWindow ,
2018-09-04 18:16:48 +00:00
defaultTestingTolerance ,
defaultTestingCpuInitializationPeriod ,
defaultTestingDelayOfInitialReadinessStatus ,
2017-02-09 19:59:19 +00:00
)
hpaController . hpaListerSynced = alwaysReady
2018-08-31 07:32:01 +00:00
if tc . recommendations != nil {
hpaController . recommendations [ "test-namespace/test-hpa" ] = tc . recommendations
}
2016-03-03 10:48:07 +00:00
2017-06-06 19:57:05 +00:00
return hpaController , informerFactory
}
2018-08-08 13:00:17 +00:00
func hotCpuCreationTime ( ) metav1 . Time {
return metav1 . Time { Time : time . Now ( ) }
}
func coolCpuCreationTime ( ) metav1 . Time {
return metav1 . Time { Time : time . Now ( ) . Add ( - 3 * time . Minute ) }
}
2017-06-06 19:57:05 +00:00
func ( tc * testCase ) runTestWithController ( t * testing . T , hpaController * HorizontalController , informerFactory informers . SharedInformerFactory ) {
2016-03-02 08:29:17 +00:00
stop := make ( chan struct { } )
defer close ( stop )
2017-02-09 19:59:19 +00:00
informerFactory . Start ( stop )
2016-03-02 08:29:17 +00:00
go hpaController . Run ( stop )
2016-03-03 10:48:07 +00:00
2016-04-20 16:57:36 +00:00
tc . Lock ( )
2015-09-14 08:14:32 +00:00
if tc . verifyEvents {
2016-04-20 16:57:36 +00:00
tc . Unlock ( )
2015-09-14 08:14:32 +00:00
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
2016-03-03 10:48:07 +00:00
time . Sleep ( 2 * time . Second )
2016-04-20 16:57:36 +00:00
} else {
tc . Unlock ( )
2015-09-14 08:14:32 +00:00
}
2016-03-02 09:08:17 +00:00
// Wait for HPA to be processed.
<- tc . processed
2015-09-14 08:14:32 +00:00
tc . verifyResults ( t )
}
2015-08-20 12:55:28 +00:00
2017-06-06 19:57:05 +00:00
func ( tc * testCase ) runTest ( t * testing . T ) {
hpaController , informerFactory := tc . setupController ( t )
tc . runTestWithController ( t , hpaController , informerFactory )
}
2015-09-14 08:14:32 +00:00
func TestScaleUp ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 5 ,
CPUTarget : 30 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 300 , 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
2015-08-25 17:16:47 +00:00
}
2015-09-14 08:14:32 +00:00
tc . runTest ( t )
}
2015-08-25 17:16:47 +00:00
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
func TestScaleUpUnreadyLessScale ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 30 ,
CPUCurrent : 60 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 300 , 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionFalse , v1 . ConditionTrue , v1 . ConditionTrue } ,
useMetricsAPI : true ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
}
tc . runTest ( t )
}
2018-08-08 13:00:17 +00:00
func TestScaleUpHotCpuLessScale ( t * testing . T ) {
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 30 ,
CPUCurrent : 60 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 300 , 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
reportedPodStartTime : [ ] metav1 . Time { hotCpuCreationTime ( ) , coolCpuCreationTime ( ) , coolCpuCreationTime ( ) } ,
useMetricsAPI : true ,
}
tc . runTest ( t )
}
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
func TestScaleUpUnreadyNoScale ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 30 ,
CPUCurrent : 40 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 400 , 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionTrue , v1 . ConditionFalse , v1 . ConditionFalse } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
}
tc . runTest ( t )
}
2018-08-08 13:00:17 +00:00
func TestScaleUpHotCpuNoScale ( t * testing . T ) {
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 30 ,
CPUCurrent : 40 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 400 , 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionTrue , v1 . ConditionFalse , v1 . ConditionFalse } ,
reportedPodStartTime : [ ] metav1 . Time { coolCpuCreationTime ( ) , hotCpuCreationTime ( ) , hotCpuCreationTime ( ) } ,
useMetricsAPI : true ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
}
tc . runTest ( t )
}
2018-02-20 18:14:43 +00:00
func TestScaleUpIgnoresFailedPods ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 2 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 30 ,
CPUCurrent : 60 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionFalse , v1 . ConditionFalse } ,
reportedPodPhase : [ ] v1 . PodPhase { v1 . PodRunning , v1 . PodRunning , v1 . PodFailed , v1 . PodFailed } ,
useMetricsAPI : true ,
2018-02-20 18:14:43 +00:00
}
tc . runTest ( t )
}
2018-08-28 16:27:47 +00:00
func TestScaleUpIgnoresDeletionPods ( t * testing . T ) {
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 2 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 30 ,
CPUCurrent : 60 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionFalse , v1 . ConditionFalse } ,
reportedPodPhase : [ ] v1 . PodPhase { v1 . PodRunning , v1 . PodRunning , v1 . PodRunning , v1 . PodRunning } ,
reportedPodDeletionTimestamp : [ ] bool { false , false , true , true } ,
useMetricsAPI : true ,
}
tc . runTest ( t )
}
2016-03-09 00:27:13 +00:00
func TestScaleUpDeployment ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 5 ,
CPUTarget : 30 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 300 , 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
2016-03-09 00:27:13 +00:00
resource : & fakeResource {
name : "test-dep" ,
2018-12-19 16:18:53 +00:00
apiVersion : "apps/v1" ,
2017-10-11 18:31:04 +00:00
kind : "Deployment" ,
2016-03-09 00:27:13 +00:00
} ,
}
tc . runTest ( t )
}
func TestScaleUpReplicaSet ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 5 ,
CPUTarget : 30 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 300 , 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
2016-03-09 00:27:13 +00:00
resource : & fakeResource {
name : "test-replicaset" ,
2018-12-19 16:18:53 +00:00
apiVersion : "apps/v1" ,
2017-10-11 18:31:04 +00:00
kind : "ReplicaSet" ,
2016-03-09 00:27:13 +00:00
} ,
}
tc . runTest ( t )
}
2016-01-29 11:20:19 +00:00
func TestScaleUpCM ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
averageValue := resource . MustParse ( "15.0" )
2016-01-29 11:20:19 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 0 ,
2016-12-02 20:18:26 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2016-12-02 20:18:26 +00:00
} ,
} ,
2016-01-29 11:20:19 +00:00
} ,
2017-02-20 06:17:16 +00:00
reportedLevels : [ ] uint64 { 20000 , 10000 , 30000 } ,
2016-01-29 11:20:19 +00:00
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
}
tc . runTest ( t )
}
2018-08-08 13:00:17 +00:00
func TestScaleUpCMUnreadyAndHotCpuNoLessScale ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
averageValue := resource . MustParse ( "15.0" )
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
2018-08-08 13:00:17 +00:00
expectedDesiredReplicas : 6 ,
2018-07-19 12:42:28 +00:00
CPUTarget : 0 ,
2016-12-02 20:18:26 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2016-12-02 20:18:26 +00:00
} ,
} ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
} ,
2017-02-20 06:17:16 +00:00
reportedLevels : [ ] uint64 { 50000 , 10000 , 30000 } ,
2016-11-18 20:50:17 +00:00
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionFalse } ,
2018-08-08 13:00:17 +00:00
reportedPodStartTime : [ ] metav1 . Time { coolCpuCreationTime ( ) , coolCpuCreationTime ( ) , hotCpuCreationTime ( ) } ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
}
tc . runTest ( t )
}
2018-08-08 13:00:17 +00:00
func TestScaleUpCMUnreadyandCpuHot ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
averageValue := resource . MustParse ( "15.0" )
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
2018-08-08 13:00:17 +00:00
expectedDesiredReplicas : 6 ,
2018-07-19 12:42:28 +00:00
CPUTarget : 0 ,
2016-12-02 20:18:26 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2016-12-02 20:18:26 +00:00
} ,
} ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
} ,
2017-02-20 06:17:16 +00:00
reportedLevels : [ ] uint64 { 50000 , 15000 , 30000 } ,
2016-11-18 20:50:17 +00:00
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionFalse , v1 . ConditionTrue , v1 . ConditionFalse } ,
2018-08-08 13:00:17 +00:00
reportedPodStartTime : [ ] metav1 . Time { hotCpuCreationTime ( ) , coolCpuCreationTime ( ) , hotCpuCreationTime ( ) } ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
2018-08-08 13:00:17 +00:00
Reason : "SucceededRescale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "TooManyReplicas" ,
} ) ,
}
tc . runTest ( t )
}
func TestScaleUpHotCpuNoScaleWouldScaleDown ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
averageValue := resource . MustParse ( "15.0" )
2018-08-08 13:00:17 +00:00
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 6 ,
CPUTarget : 0 ,
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2018-08-08 13:00:17 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 50000 , 15000 , 30000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
reportedPodStartTime : [ ] metav1 . Time { hotCpuCreationTime ( ) , coolCpuCreationTime ( ) , hotCpuCreationTime ( ) } ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "SucceededRescale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "TooManyReplicas" ,
2017-05-24 21:09:47 +00:00
} ) ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
}
tc . runTest ( t )
}
2017-02-20 06:17:16 +00:00
func TestScaleUpCMObject ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
targetValue := resource . MustParse ( "15.0" )
2017-02-20 06:17:16 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 0 ,
2017-02-20 06:17:16 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricSource {
2018-06-28 18:28:13 +00:00
DescribedObject : autoscalingv2 . CrossVersionObjectReference {
2018-12-19 16:18:53 +00:00
APIVersion : "apps/v1" ,
2017-02-20 06:17:16 +00:00
Kind : "Deployment" ,
Name : "some-deployment" ,
} ,
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
Value : & targetValue ,
} ,
2017-02-20 06:17:16 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 20000 } ,
}
tc . runTest ( t )
}
2019-01-23 20:00:05 +00:00
func TestScaleUpPerPodCMObject ( t * testing . T ) {
targetAverageValue := resource . MustParse ( "10.0" )
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 0 ,
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricSource {
DescribedObject : autoscalingv2 . CrossVersionObjectReference {
APIVersion : "apps/v1" ,
Kind : "Deployment" ,
Name : "some-deployment" ,
} ,
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & targetAverageValue ,
} ,
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 40000 } ,
}
tc . runTest ( t )
}
2018-02-21 10:19:51 +00:00
func TestScaleUpCMExternal ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 4 ,
2018-02-21 10:19:51 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
Selector : & metav1 . LabelSelector { } ,
} ,
Target : autoscalingv2 . MetricTarget {
Value : resource . NewMilliQuantity ( 6666 , resource . DecimalSI ) ,
} ,
2018-02-21 10:19:51 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 8600 } ,
}
tc . runTest ( t )
}
func TestScaleUpPerPodCMExternal ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 4 ,
2018-02-21 10:19:51 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
Selector : & metav1 . LabelSelector { } ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : resource . NewMilliQuantity ( 2222 , resource . DecimalSI ) ,
} ,
2018-02-21 10:19:51 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 8600 } ,
}
tc . runTest ( t )
}
2015-09-14 08:14:32 +00:00
func TestScaleDown ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 50 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 100 , 300 , 500 , 250 , 250 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
}
tc . runTest ( t )
}
func TestScaleDownStabilizeInitialSize ( t * testing . T ) {
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 5 ,
CPUTarget : 50 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 100 , 300 , 500 , 250 , 250 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
recommendations : nil ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ScaleDownStabilized" ,
} ) ,
2015-08-25 17:16:47 +00:00
}
2015-09-14 08:14:32 +00:00
tc . runTest ( t )
}
2015-08-25 17:16:47 +00:00
2016-01-29 11:20:19 +00:00
func TestScaleDownCM ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
averageValue := resource . MustParse ( "20.0" )
2016-01-29 11:20:19 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 0 ,
2016-12-02 20:18:26 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2016-12-02 20:18:26 +00:00
} ,
} ,
} ,
2017-02-20 06:17:16 +00:00
reportedLevels : [ ] uint64 { 12000 , 12000 , 12000 , 12000 , 12000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2017-02-20 06:17:16 +00:00
}
tc . runTest ( t )
}
func TestScaleDownCMObject ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
targetValue := resource . MustParse ( "20.0" )
2017-02-20 06:17:16 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 0 ,
2017-02-20 06:17:16 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricSource {
2018-06-28 18:28:13 +00:00
DescribedObject : autoscalingv2 . CrossVersionObjectReference {
2018-12-19 16:18:53 +00:00
APIVersion : "apps/v1" ,
2017-02-20 06:17:16 +00:00
Kind : "Deployment" ,
Name : "some-deployment" ,
} ,
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
Value : & targetValue ,
} ,
2017-02-20 06:17:16 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 12000 } ,
2016-01-29 11:20:19 +00:00
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2016-01-29 11:20:19 +00:00
}
tc . runTest ( t )
}
2019-01-23 20:00:05 +00:00
func TestScaleDownPerPodCMObject ( t * testing . T ) {
targetAverageValue := resource . MustParse ( "20.0" )
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 0 ,
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricSource {
DescribedObject : autoscalingv2 . CrossVersionObjectReference {
APIVersion : "apps/v1" ,
Kind : "Deployment" ,
Name : "some-deployment" ,
} ,
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & targetAverageValue ,
} ,
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 60000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
recommendations : [ ] timestampedRecommendation { } ,
}
tc . runTest ( t )
}
2018-02-21 10:19:51 +00:00
func TestScaleDownCMExternal ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 3 ,
2018-02-21 10:19:51 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
Selector : & metav1 . LabelSelector { } ,
} ,
Target : autoscalingv2 . MetricTarget {
Value : resource . NewMilliQuantity ( 14400 , resource . DecimalSI ) ,
} ,
2018-02-21 10:19:51 +00:00
} ,
} ,
} ,
2018-09-19 11:11:59 +00:00
reportedLevels : [ ] uint64 { 8600 } ,
recommendations : [ ] timestampedRecommendation { } ,
2018-02-21 10:19:51 +00:00
}
tc . runTest ( t )
}
func TestScaleDownPerPodCMExternal ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 3 ,
2018-02-21 10:19:51 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
Selector : & metav1 . LabelSelector { } ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : resource . NewMilliQuantity ( 3000 , resource . DecimalSI ) ,
} ,
2018-02-21 10:19:51 +00:00
} ,
} ,
} ,
2018-09-19 11:11:59 +00:00
reportedLevels : [ ] uint64 { 8600 } ,
recommendations : [ ] timestampedRecommendation { } ,
2018-02-21 10:19:51 +00:00
}
tc . runTest ( t )
}
2018-08-08 13:00:17 +00:00
func TestScaleDownIncludeUnreadyPods ( t * testing . T ) {
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 2 ,
CPUTarget : 50 ,
CPUCurrent : 30 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 100 , 300 , 500 , 250 , 250 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionFalse , v1 . ConditionFalse } ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
}
tc . runTest ( t )
}
2018-08-08 13:00:17 +00:00
func TestScaleDownIgnoreHotCpuPods ( t * testing . T ) {
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 2 ,
CPUTarget : 50 ,
CPUCurrent : 30 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 100 , 300 , 500 , 250 , 250 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
reportedPodStartTime : [ ] metav1 . Time { coolCpuCreationTime ( ) , coolCpuCreationTime ( ) , coolCpuCreationTime ( ) , hotCpuCreationTime ( ) , hotCpuCreationTime ( ) } ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2018-08-08 13:00:17 +00:00
}
tc . runTest ( t )
}
2018-02-20 18:14:43 +00:00
func TestScaleDownIgnoresFailedPods ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 50 ,
CPUCurrent : 28 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 100 , 300 , 500 , 250 , 250 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionFalse , v1 . ConditionFalse } ,
reportedPodPhase : [ ] v1 . PodPhase { v1 . PodRunning , v1 . PodRunning , v1 . PodRunning , v1 . PodRunning , v1 . PodRunning , v1 . PodFailed , v1 . PodFailed } ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2018-02-20 18:14:43 +00:00
}
tc . runTest ( t )
}
2018-08-28 16:27:47 +00:00
func TestScaleDownIgnoresDeletionPods ( t * testing . T ) {
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 5 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 50 ,
CPUCurrent : 28 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 100 , 300 , 500 , 250 , 250 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
reportedPodReadiness : [ ] v1 . ConditionStatus { v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionTrue , v1 . ConditionFalse , v1 . ConditionFalse } ,
reportedPodPhase : [ ] v1 . PodPhase { v1 . PodRunning , v1 . PodRunning , v1 . PodRunning , v1 . PodRunning , v1 . PodRunning , v1 . PodRunning , v1 . PodRunning } ,
reportedPodDeletionTimestamp : [ ] bool { false , false , false , false , false , true , true } ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2018-08-28 16:27:47 +00:00
}
tc . runTest ( t )
}
2015-09-14 08:14:32 +00:00
func TestTolerance ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 100 ,
reportedLevels : [ ] uint64 { 1010 , 1030 , 1020 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.9" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
2016-01-29 11:20:19 +00:00
}
tc . runTest ( t )
}
func TestToleranceCM ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
averageValue := resource . MustParse ( "20.0" )
2016-01-29 11:20:19 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
2016-12-02 20:18:26 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2016-12-02 20:18:26 +00:00
} ,
} ,
} ,
2017-02-20 06:17:16 +00:00
reportedLevels : [ ] uint64 { 20000 , 20001 , 21000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.9" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.1" ) } ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
2017-02-20 06:17:16 +00:00
}
tc . runTest ( t )
}
func TestToleranceCMObject ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
targetValue := resource . MustParse ( "20.0" )
2017-02-20 06:17:16 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
2017-02-20 06:17:16 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricSource {
2018-06-28 18:28:13 +00:00
DescribedObject : autoscalingv2 . CrossVersionObjectReference {
2018-12-19 16:18:53 +00:00
APIVersion : "apps/v1" ,
2017-02-20 06:17:16 +00:00
Kind : "Deployment" ,
Name : "some-deployment" ,
} ,
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
Value : & targetValue ,
} ,
2017-02-20 06:17:16 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 20050 } ,
2016-01-29 11:20:19 +00:00
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.9" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.1" ) } ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
2015-08-25 17:16:47 +00:00
}
2015-09-14 08:14:32 +00:00
tc . runTest ( t )
}
2015-08-20 12:55:28 +00:00
2018-02-21 10:19:51 +00:00
func TestToleranceCMExternal ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 4 ,
expectedDesiredReplicas : 4 ,
2018-02-21 10:19:51 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
Selector : & metav1 . LabelSelector { } ,
} ,
Target : autoscalingv2 . MetricTarget {
Value : resource . NewMilliQuantity ( 8666 , resource . DecimalSI ) ,
} ,
2018-02-21 10:19:51 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 8600 } ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
}
tc . runTest ( t )
}
2019-01-23 20:00:05 +00:00
func TestTolerancePerPodCMObject ( t * testing . T ) {
tc := testCase {
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 4 ,
expectedDesiredReplicas : 4 ,
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricSource {
DescribedObject : autoscalingv2 . CrossVersionObjectReference {
APIVersion : "apps/v1" ,
Kind : "Deployment" ,
Name : "some-deployment" ,
} ,
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
Selector : & metav1 . LabelSelector { } ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : resource . NewMilliQuantity ( 2200 , resource . DecimalSI ) ,
} ,
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 8600 } ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
}
tc . runTest ( t )
}
2018-02-21 10:19:51 +00:00
func TestTolerancePerPodCMExternal ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 4 ,
expectedDesiredReplicas : 4 ,
2018-02-21 10:19:51 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
Selector : & metav1 . LabelSelector { } ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : resource . NewMilliQuantity ( 2200 , resource . DecimalSI ) ,
} ,
2018-02-21 10:19:51 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 8600 } ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
}
tc . runTest ( t )
}
2015-09-17 12:08:39 +00:00
func TestMinReplicas ( t * testing . T ) {
2015-09-14 08:14:32 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 2 ,
CPUTarget : 90 ,
reportedLevels : [ ] uint64 { 10 , 95 , 10 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.9" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "TooFewReplicas" ,
} ) ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2015-09-14 13:08:43 +00:00
}
2015-09-14 08:14:32 +00:00
tc . runTest ( t )
}
2015-09-14 13:08:43 +00:00
2017-07-16 19:36:08 +00:00
func TestMinReplicasDesiredZero ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 2 ,
CPUTarget : 90 ,
reportedLevels : [ ] uint64 { 0 , 0 , 0 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.9" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.1" ) } ,
useMetricsAPI : true ,
2017-07-16 19:36:08 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "TooFewReplicas" ,
} ) ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2017-07-16 19:36:08 +00:00
}
tc . runTest ( t )
}
2016-02-12 15:26:59 +00:00
func TestZeroReplicas ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 3 ,
maxReplicas : 5 ,
initialReplicas : 0 ,
expectedDesiredReplicas : 0 ,
CPUTarget : 90 ,
reportedLevels : [ ] uint64 { } ,
reportedCPURequests : [ ] resource . Quantity { } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv1 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededGetScale" } ,
{ Type : autoscalingv1 . ScalingActive , Status : v1 . ConditionFalse , Reason : "ScalingDisabled" } ,
} ,
2016-02-12 15:26:59 +00:00
}
tc . runTest ( t )
}
2016-02-23 14:18:49 +00:00
func TestTooFewReplicas ( t * testing . T ) {
2016-02-12 15:26:59 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 3 ,
maxReplicas : 5 ,
initialReplicas : 2 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 90 ,
reportedLevels : [ ] uint64 { } ,
reportedCPURequests : [ ] resource . Quantity { } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv1 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededRescale" } ,
} ,
2016-02-12 15:26:59 +00:00
}
tc . runTest ( t )
}
func TestTooManyReplicas ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 3 ,
maxReplicas : 5 ,
initialReplicas : 10 ,
expectedDesiredReplicas : 5 ,
CPUTarget : 90 ,
reportedLevels : [ ] uint64 { } ,
reportedCPURequests : [ ] resource . Quantity { } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv1 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededRescale" } ,
} ,
2016-02-12 15:26:59 +00:00
}
tc . runTest ( t )
}
2015-09-17 12:08:39 +00:00
func TestMaxReplicas ( t * testing . T ) {
2015-09-14 08:14:32 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 5 ,
CPUTarget : 90 ,
reportedLevels : [ ] uint64 { 8000 , 9500 , 1000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.9" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "TooManyReplicas" ,
} ) ,
2015-09-14 08:14:32 +00:00
}
tc . runTest ( t )
2015-08-20 12:55:28 +00:00
}
2015-09-14 08:14:32 +00:00
func TestSuperfluousMetrics ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 4 ,
expectedDesiredReplicas : 6 ,
CPUTarget : 100 ,
reportedLevels : [ ] uint64 { 4000 , 9500 , 3000 , 7000 , 3200 , 2000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "TooManyReplicas" ,
} ) ,
2015-08-26 14:17:18 +00:00
}
2015-09-14 08:14:32 +00:00
tc . runTest ( t )
}
2015-08-28 10:24:00 +00:00
2015-09-14 08:14:32 +00:00
func TestMissingMetrics ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 4 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 100 ,
reportedLevels : [ ] uint64 { 400 , 95 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2015-08-20 12:55:28 +00:00
}
2015-09-14 08:14:32 +00:00
tc . runTest ( t )
}
func TestEmptyMetrics ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 4 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 100 ,
reportedLevels : [ ] uint64 { } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv1 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededGetScale" } ,
{ Type : autoscalingv1 . ScalingActive , Status : v1 . ConditionFalse , Reason : "FailedGetResourceMetric" } ,
} ,
2015-08-20 12:55:28 +00:00
}
2015-09-14 08:14:32 +00:00
tc . runTest ( t )
}
2015-10-13 15:24:23 +00:00
func TestEmptyCPURequest ( t * testing . T ) {
2015-09-14 08:14:32 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 1 ,
expectedDesiredReplicas : 1 ,
CPUTarget : 100 ,
reportedLevels : [ ] uint64 { 200 } ,
reportedCPURequests : [ ] resource . Quantity { } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv1 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededGetScale" } ,
{ Type : autoscalingv1 . ScalingActive , Status : v1 . ConditionFalse , Reason : "FailedGetResourceMetric" } ,
} ,
2015-10-13 15:24:23 +00:00
}
tc . runTest ( t )
}
func TestEventCreated ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 1 ,
expectedDesiredReplicas : 2 ,
CPUTarget : 50 ,
reportedLevels : [ ] uint64 { 200 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.2" ) } ,
verifyEvents : true ,
useMetricsAPI : true ,
2015-08-25 17:16:47 +00:00
}
2015-09-14 08:14:32 +00:00
tc . runTest ( t )
}
2015-08-25 17:16:47 +00:00
2015-09-14 08:14:32 +00:00
func TestEventNotCreated ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 2 ,
expectedDesiredReplicas : 2 ,
CPUTarget : 50 ,
reportedLevels : [ ] uint64 { 200 , 200 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.4" ) , resource . MustParse ( "0.4" ) } ,
verifyEvents : true ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
2015-09-14 08:14:32 +00:00
}
tc . runTest ( t )
2015-08-20 12:55:28 +00:00
}
2015-10-13 15:24:23 +00:00
2016-10-17 15:14:15 +00:00
func TestMissingReports ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 4 ,
expectedDesiredReplicas : 2 ,
CPUTarget : 50 ,
reportedLevels : [ ] uint64 { 200 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.2" ) } ,
useMetricsAPI : true ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2016-10-17 15:14:15 +00:00
}
tc . runTest ( t )
}
func TestUpscaleCap ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 100 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 24 ,
CPUTarget : 10 ,
reportedLevels : [ ] uint64 { 100 , 200 , 300 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "ScaleUpLimit" ,
} ) ,
}
tc . runTest ( t )
}
2017-10-11 01:48:34 +00:00
func TestUpscaleCapGreaterThanMaxReplicas ( t * testing . T ) {
tc := testCase {
minReplicas : 1 ,
maxReplicas : 20 ,
initialReplicas : 3 ,
2018-07-19 12:42:28 +00:00
// expectedDesiredReplicas would be 24 without maxReplicas
expectedDesiredReplicas : 20 ,
CPUTarget : 10 ,
reportedLevels : [ ] uint64 { 100 , 200 , 300 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
2017-10-11 01:48:34 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "TooManyReplicas" ,
} ) ,
}
tc . runTest ( t )
}
2017-05-24 21:09:47 +00:00
func TestConditionInvalidSelectorMissing ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 100 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 10 ,
reportedLevels : [ ] uint64 { 100 , 200 , 300 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{
Type : autoscalingv1 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "SucceededGetScale" ,
} ,
{
Type : autoscalingv1 . ScalingActive ,
Status : v1 . ConditionFalse ,
Reason : "InvalidSelector" ,
} ,
} ,
}
2018-02-21 10:19:51 +00:00
_ , _ , _ , _ , testScaleClient := tc . prepareTestClient ( t )
2017-10-11 18:31:04 +00:00
tc . testScaleClient = testScaleClient
2017-05-24 21:09:47 +00:00
2017-10-11 18:31:04 +00:00
testScaleClient . PrependReactor ( "get" , "replicationcontrollers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
obj := & autoscalingv1 . Scale {
2017-05-24 21:09:47 +00:00
ObjectMeta : metav1 . ObjectMeta {
Name : tc . resource . name ,
} ,
2017-10-11 18:31:04 +00:00
Spec : autoscalingv1 . ScaleSpec {
2017-05-24 21:09:47 +00:00
Replicas : tc . initialReplicas ,
} ,
2017-10-11 18:31:04 +00:00
Status : autoscalingv1 . ScaleStatus {
2017-05-24 21:09:47 +00:00
Replicas : tc . initialReplicas ,
} ,
}
return true , obj , nil
} )
tc . runTest ( t )
}
func TestConditionInvalidSelectorUnparsable ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 100 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 10 ,
reportedLevels : [ ] uint64 { 100 , 200 , 300 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{
Type : autoscalingv1 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "SucceededGetScale" ,
} ,
{
Type : autoscalingv1 . ScalingActive ,
Status : v1 . ConditionFalse ,
Reason : "InvalidSelector" ,
} ,
} ,
}
2018-02-21 18:05:26 +00:00
_ , _ , _ , _ , testScaleClient := tc . prepareTestClient ( t )
2017-10-11 18:31:04 +00:00
tc . testScaleClient = testScaleClient
2017-05-24 21:09:47 +00:00
2017-10-11 18:31:04 +00:00
testScaleClient . PrependReactor ( "get" , "replicationcontrollers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
obj := & autoscalingv1 . Scale {
2017-05-24 21:09:47 +00:00
ObjectMeta : metav1 . ObjectMeta {
Name : tc . resource . name ,
} ,
2017-10-11 18:31:04 +00:00
Spec : autoscalingv1 . ScaleSpec {
2017-05-24 21:09:47 +00:00
Replicas : tc . initialReplicas ,
} ,
2017-10-11 18:31:04 +00:00
Status : autoscalingv1 . ScaleStatus {
Replicas : tc . initialReplicas ,
Selector : "cheddar cheese" ,
2017-05-24 21:09:47 +00:00
} ,
}
return true , obj , nil
} )
tc . runTest ( t )
}
func TestConditionFailedGetMetrics ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
targetValue := resource . MustParse ( "15.0" )
averageValue := resource . MustParse ( "15.0" )
2017-05-24 21:09:47 +00:00
metricsTargets := map [ string ] [ ] autoscalingv2 . MetricSpec {
"FailedGetResourceMetric" : nil ,
"FailedGetPodsMetric" : {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2017-05-24 21:09:47 +00:00
} ,
} ,
} ,
"FailedGetObjectMetric" : {
{
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricSource {
2018-06-28 18:28:13 +00:00
DescribedObject : autoscalingv2 . CrossVersionObjectReference {
2018-12-19 16:18:53 +00:00
APIVersion : "apps/v1" ,
2017-05-24 21:09:47 +00:00
Kind : "Deployment" ,
Name : "some-deployment" ,
} ,
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
Value : & targetValue ,
} ,
2017-05-24 21:09:47 +00:00
} ,
} ,
} ,
2018-03-19 14:22:36 +00:00
"FailedGetExternalMetric" : {
{
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
Selector : & metav1 . LabelSelector { } ,
} ,
Target : autoscalingv2 . MetricTarget {
Value : resource . NewMilliQuantity ( 300 , resource . DecimalSI ) ,
} ,
2018-03-19 14:22:36 +00:00
} ,
} ,
} ,
2017-05-24 21:09:47 +00:00
}
for reason , specs := range metricsTargets {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 100 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 10 ,
reportedLevels : [ ] uint64 { 100 , 200 , 300 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
}
2018-03-19 14:22:36 +00:00
_ , testMetricsClient , testCMClient , testEMClient , _ := tc . prepareTestClient ( t )
2017-05-24 21:09:47 +00:00
tc . testMetricsClient = testMetricsClient
tc . testCMClient = testCMClient
2018-03-19 14:22:36 +00:00
tc . testEMClient = testEMClient
2017-05-24 21:09:47 +00:00
testMetricsClient . PrependReactor ( "list" , "pods" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
2017-09-09 18:53:34 +00:00
return true , & metricsapi . PodMetricsList { } , fmt . Errorf ( "something went wrong" )
2017-05-24 21:09:47 +00:00
} )
testCMClient . PrependReactor ( "get" , "*" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
2017-09-09 18:53:34 +00:00
return true , & cmapi . MetricValueList { } , fmt . Errorf ( "something went wrong" )
2017-05-24 21:09:47 +00:00
} )
2018-03-19 14:22:36 +00:00
testEMClient . PrependReactor ( "list" , "*" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
return true , & emapi . ExternalMetricValueList { } , fmt . Errorf ( "something went wrong" )
} )
2017-05-24 21:09:47 +00:00
tc . expectedConditions = [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv1 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededGetScale" } ,
{ Type : autoscalingv1 . ScalingActive , Status : v1 . ConditionFalse , Reason : reason } ,
}
if specs != nil {
tc . CPUTarget = 0
} else {
tc . CPUTarget = 10
}
tc . metricsTarget = specs
tc . runTest ( t )
}
}
func TestConditionInvalidSourceType ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 0 ,
2017-05-24 21:09:47 +00:00
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : "CheddarCheese" ,
} ,
} ,
reportedLevels : [ ] uint64 { 20000 } ,
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{
Type : autoscalingv1 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "SucceededGetScale" ,
} ,
{
Type : autoscalingv1 . ScalingActive ,
Status : v1 . ConditionFalse ,
Reason : "InvalidMetricSourceType" ,
} ,
} ,
}
tc . runTest ( t )
}
func TestConditionFailedGetScale ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 100 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 10 ,
reportedLevels : [ ] uint64 { 100 , 200 , 300 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{
Type : autoscalingv1 . AbleToScale ,
Status : v1 . ConditionFalse ,
Reason : "FailedGetScale" ,
} ,
} ,
}
2018-02-21 18:05:26 +00:00
_ , _ , _ , _ , testScaleClient := tc . prepareTestClient ( t )
2017-10-11 18:31:04 +00:00
tc . testScaleClient = testScaleClient
2017-05-24 21:09:47 +00:00
2017-10-11 18:31:04 +00:00
testScaleClient . PrependReactor ( "get" , "replicationcontrollers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
return true , & autoscalingv1 . Scale { } , fmt . Errorf ( "something went wrong" )
2017-05-24 21:09:47 +00:00
} )
tc . runTest ( t )
}
func TestConditionFailedUpdateScale ( t * testing . T ) {
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 100 ,
reportedLevels : [ ] uint64 { 150 , 150 , 150 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionFalse ,
Reason : "FailedUpdateScale" ,
} ) ,
}
2018-02-21 18:05:26 +00:00
_ , _ , _ , _ , testScaleClient := tc . prepareTestClient ( t )
2017-10-11 18:31:04 +00:00
tc . testScaleClient = testScaleClient
2017-05-24 21:09:47 +00:00
2017-10-11 18:31:04 +00:00
testScaleClient . PrependReactor ( "update" , "replicationcontrollers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
return true , & autoscalingv1 . Scale { } , fmt . Errorf ( "something went wrong" )
2017-05-24 21:09:47 +00:00
} )
tc . runTest ( t )
}
2018-07-18 12:21:00 +00:00
func NoTestBackoffUpscale ( t * testing . T ) {
2017-05-24 21:09:47 +00:00
time := metav1 . Time { Time : time . Now ( ) }
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 3 ,
CPUTarget : 100 ,
reportedLevels : [ ] uint64 { 150 , 150 , 150 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
lastScaleTime : & time ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
2018-07-18 12:21:00 +00:00
Status : v1 . ConditionTrue ,
Reason : "SucceededRescale" ,
} ) ,
}
tc . runTest ( t )
}
func TestNoBackoffUpscaleCM ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
averageValue := resource . MustParse ( "15.0" )
2018-07-18 12:21:00 +00:00
time := metav1 . Time { Time : time . Now ( ) }
tc := testCase {
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 0 ,
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2018-07-18 12:21:00 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 20000 , 10000 , 30000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
//useMetricsAPI: true,
lastScaleTime : & time ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "SucceededRescale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
2017-05-24 21:09:47 +00:00
Status : v1 . ConditionFalse ,
2018-07-18 12:21:00 +00:00
Reason : "DesiredWithinRange" ,
} ) ,
}
tc . runTest ( t )
}
func TestNoBackoffUpscaleCMNoBackoffCpu ( t * testing . T ) {
2018-06-28 18:28:13 +00:00
averageValue := resource . MustParse ( "15.0" )
2018-07-18 12:21:00 +00:00
time := metav1 . Time { Time : time . Now ( ) }
tc := testCase {
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 3 ,
expectedDesiredReplicas : 5 ,
CPUTarget : 10 ,
metricsTarget : [ ] autoscalingv2 . MetricSpec {
{
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricSource {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : "qps" ,
} ,
Target : autoscalingv2 . MetricTarget {
AverageValue : & averageValue ,
} ,
2018-07-18 12:21:00 +00:00
} ,
} ,
} ,
reportedLevels : [ ] uint64 { 20000 , 10000 , 30000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
lastScaleTime : & time ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "SucceededRescale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
Reason : "TooManyReplicas" ,
2017-05-24 21:09:47 +00:00
} ) ,
}
tc . runTest ( t )
}
2018-08-31 07:32:01 +00:00
func TestStabilizeDownscale ( t * testing . T ) {
2017-05-24 21:09:47 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 1 ,
maxReplicas : 5 ,
initialReplicas : 4 ,
expectedDesiredReplicas : 4 ,
CPUTarget : 100 ,
reportedLevels : [ ] uint64 { 50 , 50 , 50 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) , resource . MustParse ( "0.1" ) } ,
useMetricsAPI : true ,
2017-05-24 21:09:47 +00:00
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
2018-08-31 07:32:01 +00:00
Status : v1 . ConditionTrue ,
Reason : "ScaleDownStabilized" ,
2017-05-24 21:09:47 +00:00
} ) ,
2018-08-31 07:32:01 +00:00
recommendations : [ ] timestampedRecommendation {
{ 10 , time . Now ( ) . Add ( - 10 * time . Minute ) } ,
{ 4 , time . Now ( ) . Add ( - 1 * time . Minute ) } ,
} ,
2016-10-17 15:14:15 +00:00
}
tc . runTest ( t )
}
2016-05-06 21:52:30 +00:00
// TestComputedToleranceAlgImplementation is a regression test which
// back-calculates a minimal percentage for downscaling based on a small percentage
// increase in pod utilization which is calibrated against the tolerance value.
func TestComputedToleranceAlgImplementation ( t * testing . T ) {
startPods := int32 ( 10 )
// 150 mCPU per pod.
totalUsedCPUOfAllPods := uint64 ( startPods * 150 )
// Each pod starts out asking for 2X what is really needed.
// This means we will have a 50% ratio of used/requested
totalRequestedCPUOfAllPods := int32 ( 2 * totalUsedCPUOfAllPods )
requestedToUsed := float64 ( totalRequestedCPUOfAllPods / int32 ( totalUsedCPUOfAllPods ) )
// Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels.
perPodRequested := totalRequestedCPUOfAllPods / startPods
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
2017-09-11 13:59:53 +00:00
target := math . Abs ( 1 / ( requestedToUsed * ( 1 - defaultTestingTolerance ) ) ) + .01
2017-09-09 18:53:34 +00:00
finalCPUPercentTarget := int32 ( target * 100 )
2016-05-06 21:52:30 +00:00
resourcesUsedRatio := float64 ( totalUsedCPUOfAllPods ) / float64 ( float64 ( totalRequestedCPUOfAllPods ) * target )
// i.e. .60 * 20 -> scaled down expectation.
finalPods := int32 ( math . Ceil ( resourcesUsedRatio * float64 ( startPods ) ) )
// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
2018-12-27 15:26:41 +00:00
tc1 := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 0 ,
maxReplicas : 1000 ,
initialReplicas : startPods ,
expectedDesiredReplicas : finalPods ,
CPUTarget : finalCPUPercentTarget ,
2016-05-06 21:52:30 +00:00
reportedLevels : [ ] uint64 {
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
} ,
reportedCPURequests : [ ] resource . Quantity {
resource . MustParse ( fmt . Sprint ( perPodRequested + 100 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested - 100 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested + 10 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested - 10 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested + 2 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested - 2 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested + 1 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested - 1 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested ) + "m" ) ,
} ,
2018-09-19 11:11:59 +00:00
useMetricsAPI : true ,
recommendations : [ ] timestampedRecommendation { } ,
2016-05-06 21:52:30 +00:00
}
2018-12-27 15:26:41 +00:00
tc1 . runTest ( t )
2016-05-06 21:52:30 +00:00
2017-09-11 13:59:53 +00:00
target = math . Abs ( 1 / ( requestedToUsed * ( 1 - defaultTestingTolerance ) ) ) + .004
2017-09-09 18:53:34 +00:00
finalCPUPercentTarget = int32 ( target * 100 )
2018-12-27 15:26:41 +00:00
tc2 := testCase {
minReplicas : 0 ,
maxReplicas : 1000 ,
initialReplicas : startPods ,
expectedDesiredReplicas : startPods ,
CPUTarget : finalCPUPercentTarget ,
reportedLevels : [ ] uint64 {
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
totalUsedCPUOfAllPods / 10 ,
} ,
reportedCPURequests : [ ] resource . Quantity {
resource . MustParse ( fmt . Sprint ( perPodRequested + 100 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested - 100 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested + 10 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested - 10 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested + 2 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested - 2 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested + 1 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested - 1 ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested ) + "m" ) ,
resource . MustParse ( fmt . Sprint ( perPodRequested ) + "m" ) ,
} ,
useMetricsAPI : true ,
recommendations : [ ] timestampedRecommendation { } ,
expectedConditions : statusOkWithOverrides ( autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
Reason : "ReadyForNewScale" ,
} ) ,
}
tc2 . runTest ( t )
2016-05-06 21:52:30 +00:00
}
2016-11-21 09:32:00 +00:00
func TestScaleUpRCImmediately ( t * testing . T ) {
2016-12-03 18:57:26 +00:00
time := metav1 . Time { Time : time . Now ( ) }
2016-11-21 09:32:00 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
initialReplicas : 1 ,
expectedDesiredReplicas : 2 ,
verifyCPUCurrent : false ,
reportedLevels : [ ] uint64 { 0 , 0 , 0 , 0 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
useMetricsAPI : true ,
lastScaleTime : & time ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv1 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededRescale" } ,
} ,
2016-11-21 09:32:00 +00:00
}
tc . runTest ( t )
}
func TestScaleDownRCImmediately ( t * testing . T ) {
2016-12-03 18:57:26 +00:00
time := metav1 . Time { Time : time . Now ( ) }
2016-11-21 09:32:00 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 5 ,
initialReplicas : 6 ,
expectedDesiredReplicas : 5 ,
CPUTarget : 50 ,
reportedLevels : [ ] uint64 { 8000 , 9500 , 1000 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "0.9" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.1" ) } ,
useMetricsAPI : true ,
lastScaleTime : & time ,
2017-05-24 21:09:47 +00:00
expectedConditions : [ ] autoscalingv1 . HorizontalPodAutoscalerCondition {
{ Type : autoscalingv1 . AbleToScale , Status : v1 . ConditionTrue , Reason : "SucceededRescale" } ,
} ,
2016-11-21 09:32:00 +00:00
}
tc . runTest ( t )
}
2017-06-06 19:57:05 +00:00
func TestAvoidUncessaryUpdates ( t * testing . T ) {
2018-09-04 18:16:48 +00:00
now := metav1 . Time { Time : time . Now ( ) . Add ( - time . Hour ) }
2017-06-06 19:57:05 +00:00
tc := testCase {
2018-07-19 12:42:28 +00:00
minReplicas : 2 ,
maxReplicas : 6 ,
2018-09-04 18:16:48 +00:00
initialReplicas : 2 ,
expectedDesiredReplicas : 2 ,
2018-07-19 12:42:28 +00:00
CPUTarget : 30 ,
CPUCurrent : 40 ,
verifyCPUCurrent : true ,
reportedLevels : [ ] uint64 { 400 , 500 , 700 } ,
reportedCPURequests : [ ] resource . Quantity { resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) , resource . MustParse ( "1.0" ) } ,
2018-08-08 13:00:17 +00:00
reportedPodStartTime : [ ] metav1 . Time { coolCpuCreationTime ( ) , hotCpuCreationTime ( ) , hotCpuCreationTime ( ) } ,
2018-07-19 12:42:28 +00:00
useMetricsAPI : true ,
2018-09-04 18:16:48 +00:00
lastScaleTime : & now ,
2018-09-19 11:11:59 +00:00
recommendations : [ ] timestampedRecommendation { } ,
2017-06-06 19:57:05 +00:00
}
2018-02-21 18:05:26 +00:00
testClient , _ , _ , _ , _ := tc . prepareTestClient ( t )
2017-06-06 19:57:05 +00:00
tc . testClient = testClient
testClient . PrependReactor ( "list" , "horizontalpodautoscalers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
2018-09-04 18:16:48 +00:00
// fake out the verification logic and mark that we're done processing
go func ( ) {
// wait a tick and then mark that we're finished (otherwise, we have no
// way to indicate that we're finished, because the function decides not to do anything)
time . Sleep ( 1 * time . Second )
tc . statusUpdated = true
tc . processed <- "test-hpa"
} ( )
quantity := resource . MustParse ( "400m" )
obj := & autoscalingv2 . HorizontalPodAutoscalerList {
Items : [ ] autoscalingv2 . HorizontalPodAutoscaler {
{
ObjectMeta : metav1 . ObjectMeta {
Name : "test-hpa" ,
Namespace : "test-namespace" ,
SelfLink : "experimental/v1/namespaces/test-namespace/horizontalpodautoscalers/test-hpa" ,
} ,
Spec : autoscalingv2 . HorizontalPodAutoscalerSpec {
ScaleTargetRef : autoscalingv2 . CrossVersionObjectReference {
Kind : "ReplicationController" ,
Name : "test-rc" ,
APIVersion : "v1" ,
} ,
2017-06-06 19:57:05 +00:00
2018-09-04 18:16:48 +00:00
MinReplicas : & tc . minReplicas ,
MaxReplicas : tc . maxReplicas ,
} ,
Status : autoscalingv2 . HorizontalPodAutoscalerStatus {
CurrentReplicas : tc . initialReplicas ,
DesiredReplicas : tc . initialReplicas ,
LastScaleTime : tc . lastScaleTime ,
CurrentMetrics : [ ] autoscalingv2 . MetricStatus {
{
Type : autoscalingv2 . ResourceMetricSourceType ,
Resource : & autoscalingv2 . ResourceMetricStatus {
Name : v1 . ResourceCPU ,
Current : autoscalingv2 . MetricValueStatus {
AverageValue : & quantity ,
AverageUtilization : & tc . CPUCurrent ,
} ,
} ,
} ,
} ,
Conditions : [ ] autoscalingv2 . HorizontalPodAutoscalerCondition {
{
Type : autoscalingv2 . AbleToScale ,
Status : v1 . ConditionTrue ,
LastTransitionTime : * tc . lastScaleTime ,
Reason : "ReadyForNewScale" ,
Message : "recommended size matches current size" ,
} ,
{
Type : autoscalingv2 . ScalingActive ,
Status : v1 . ConditionTrue ,
LastTransitionTime : * tc . lastScaleTime ,
Reason : "ValidMetricFound" ,
Message : "the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request)" ,
} ,
{
Type : autoscalingv2 . ScalingLimited ,
Status : v1 . ConditionTrue ,
LastTransitionTime : * tc . lastScaleTime ,
Reason : "TooFewReplicas" ,
Message : "the desired replica count is more than the maximum replica count" ,
} ,
} ,
} ,
} ,
} ,
}
// and... convert to autoscaling v1 to return the right type
objv1 , err := unsafeConvertToVersionVia ( obj , autoscalingv1 . SchemeGroupVersion )
if err != nil {
return true , nil , err
2017-06-06 19:57:05 +00:00
}
2018-09-04 18:16:48 +00:00
return true , objv1 , nil
2017-06-06 19:57:05 +00:00
} )
testClient . PrependReactor ( "update" , "horizontalpodautoscalers" , func ( action core . Action ) ( handled bool , ret runtime . Object , err error ) {
tc . Lock ( )
defer tc . Unlock ( )
assert . Fail ( t , "should not have attempted to update the HPA when nothing changed" )
// mark that we've processed this HPA
tc . processed <- ""
return true , nil , fmt . Errorf ( "unexpected call" )
} )
controller , informerFactory := tc . setupController ( t )
tc . runTestWithController ( t , controller , informerFactory )
}
2017-10-19 13:30:23 +00:00
func TestConvertDesiredReplicasWithRules ( t * testing . T ) {
conversionTestCases := [ ] struct {
currentReplicas int32
2018-07-19 12:42:28 +00:00
expectedDesiredReplicas int32
2017-10-19 13:30:23 +00:00
hpaMinReplicas int32
hpaMaxReplicas int32
expectedConvertedDesiredReplicas int32
expectedCondition string
annotation string
} {
{
currentReplicas : 5 ,
2018-07-19 12:42:28 +00:00
expectedDesiredReplicas : 7 ,
2017-10-19 13:30:23 +00:00
hpaMinReplicas : 3 ,
hpaMaxReplicas : 8 ,
expectedConvertedDesiredReplicas : 7 ,
expectedCondition : "DesiredWithinRange" ,
annotation : "prenormalized desired replicas within range" ,
} ,
{
currentReplicas : 3 ,
2018-07-19 12:42:28 +00:00
expectedDesiredReplicas : 1 ,
2017-10-19 13:30:23 +00:00
hpaMinReplicas : 2 ,
hpaMaxReplicas : 8 ,
expectedConvertedDesiredReplicas : 2 ,
expectedCondition : "TooFewReplicas" ,
annotation : "prenormalized desired replicas < minReplicas" ,
} ,
{
currentReplicas : 1 ,
2018-07-19 12:42:28 +00:00
expectedDesiredReplicas : 0 ,
2017-10-19 13:30:23 +00:00
hpaMinReplicas : 0 ,
hpaMaxReplicas : 10 ,
expectedConvertedDesiredReplicas : 1 ,
expectedCondition : "TooFewReplicas" ,
annotation : "1 is minLimit because hpaMinReplicas < 1" ,
} ,
{
currentReplicas : 20 ,
2018-07-19 12:42:28 +00:00
expectedDesiredReplicas : 1000 ,
2017-10-19 13:30:23 +00:00
hpaMinReplicas : 1 ,
hpaMaxReplicas : 10 ,
expectedConvertedDesiredReplicas : 10 ,
expectedCondition : "TooManyReplicas" ,
annotation : "maxReplicas is the limit because maxReplicas < scaleUpLimit" ,
} ,
{
currentReplicas : 3 ,
2018-07-19 12:42:28 +00:00
expectedDesiredReplicas : 1000 ,
2017-10-19 13:30:23 +00:00
hpaMinReplicas : 1 ,
hpaMaxReplicas : 2000 ,
expectedConvertedDesiredReplicas : calculateScaleUpLimit ( 3 ) ,
expectedCondition : "ScaleUpLimit" ,
annotation : "scaleUpLimit is the limit because scaleUpLimit < maxReplicas" ,
} ,
}
for _ , ctc := range conversionTestCases {
actualConvertedDesiredReplicas , actualCondition , _ := convertDesiredReplicasWithRules (
2018-07-19 12:42:28 +00:00
ctc . currentReplicas , ctc . expectedDesiredReplicas , ctc . hpaMinReplicas , ctc . hpaMaxReplicas ,
2017-10-19 13:30:23 +00:00
)
assert . Equal ( t , ctc . expectedConvertedDesiredReplicas , actualConvertedDesiredReplicas , ctc . annotation )
assert . Equal ( t , ctc . expectedCondition , actualCondition , ctc . annotation )
}
}
2018-08-31 07:32:01 +00:00
func TestNormalizeDesiredReplicas ( t * testing . T ) {
tests := [ ] struct {
name string
key string
recommendations [ ] timestampedRecommendation
prenormalizedDesiredReplicas int32
expectedStabilizedReplicas int32
expectedLogLength int
} {
{
"empty log" ,
"" ,
[ ] timestampedRecommendation { } ,
5 ,
5 ,
1 ,
} ,
{
"stabilize" ,
"" ,
[ ] timestampedRecommendation {
{ 4 , time . Now ( ) . Add ( - 2 * time . Minute ) } ,
{ 5 , time . Now ( ) . Add ( - 1 * time . Minute ) } ,
} ,
3 ,
5 ,
3 ,
} ,
{
"no stabilize" ,
"" ,
[ ] timestampedRecommendation {
{ 1 , time . Now ( ) . Add ( - 2 * time . Minute ) } ,
{ 2 , time . Now ( ) . Add ( - 1 * time . Minute ) } ,
} ,
3 ,
3 ,
3 ,
} ,
{
"no stabilize - old recommendations" ,
"" ,
[ ] timestampedRecommendation {
{ 10 , time . Now ( ) . Add ( - 10 * time . Minute ) } ,
{ 9 , time . Now ( ) . Add ( - 9 * time . Minute ) } ,
} ,
3 ,
3 ,
2 ,
} ,
{
"stabilize - old recommendations" ,
"" ,
[ ] timestampedRecommendation {
{ 10 , time . Now ( ) . Add ( - 10 * time . Minute ) } ,
{ 4 , time . Now ( ) . Add ( - 1 * time . Minute ) } ,
{ 5 , time . Now ( ) . Add ( - 2 * time . Minute ) } ,
{ 9 , time . Now ( ) . Add ( - 9 * time . Minute ) } ,
} ,
3 ,
5 ,
4 ,
} ,
}
for _ , tc := range tests {
hc := HorizontalController {
downscaleStabilisationWindow : 5 * time . Minute ,
recommendations : map [ string ] [ ] timestampedRecommendation {
tc . key : tc . recommendations ,
} ,
}
r := hc . stabilizeRecommendation ( tc . key , tc . prenormalizedDesiredReplicas )
if r != tc . expectedStabilizedReplicas {
t . Errorf ( "[%s] got %d stabilized replicas, expected %d" , tc . name , r , tc . expectedStabilizedReplicas )
}
if len ( hc . recommendations [ tc . key ] ) != tc . expectedLogLength {
t . Errorf ( "[%s] after stabilization recommendations log has %d entries, expected %d" , tc . name , len ( hc . recommendations [ tc . key ] ) , tc . expectedLogLength )
}
}
}
2015-12-13 08:54:43 +00:00
// TODO: add more tests