2015-08-17 12:18:26 +00:00
/ *
2016-06-03 00:25:58 +00:00
Copyright 2015 The Kubernetes Authors .
2015-08-17 12:18:26 +00:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2015-09-10 13:10:07 +00:00
package podautoscaler
2015-08-17 12:18:26 +00:00
import (
"fmt"
2015-09-07 10:25:04 +00:00
"math"
2015-08-17 12:18:26 +00:00
"time"
"github.com/golang/glog"
2017-06-22 18:24:23 +00:00
autoscalingv1 "k8s.io/api/autoscaling/v1"
2018-06-28 18:28:13 +00:00
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
2017-06-22 18:24:23 +00:00
"k8s.io/api/core/v1"
2017-06-06 19:57:05 +00:00
apiequality "k8s.io/apimachinery/pkg/api/equality"
2017-03-08 07:02:34 +00:00
"k8s.io/apimachinery/pkg/api/errors"
2017-10-11 18:31:04 +00:00
apimeta "k8s.io/apimachinery/pkg/api/meta"
2017-01-25 13:13:07 +00:00
"k8s.io/apimachinery/pkg/api/resource"
2017-01-11 14:09:48 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2016-12-02 20:18:26 +00:00
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
2017-01-11 14:09:48 +00:00
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
2017-03-08 07:02:34 +00:00
"k8s.io/apimachinery/pkg/util/wait"
2017-06-23 20:56:37 +00:00
autoscalinginformers "k8s.io/client-go/informers/autoscaling/v1"
2018-09-04 18:16:48 +00:00
coreinformers "k8s.io/client-go/informers/core/v1"
2017-07-10 17:54:48 +00:00
"k8s.io/client-go/kubernetes/scheme"
2017-06-23 20:56:37 +00:00
autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
2017-01-30 18:39:54 +00:00
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
2017-06-23 20:56:37 +00:00
autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1"
2018-09-04 18:16:48 +00:00
corelisters "k8s.io/client-go/listers/core/v1"
2017-10-11 18:31:04 +00:00
scaleclient "k8s.io/client-go/scale"
2017-01-24 14:11:51 +00:00
"k8s.io/client-go/tools/cache"
2017-01-30 18:39:54 +00:00
"k8s.io/client-go/tools/record"
2017-03-08 07:02:34 +00:00
"k8s.io/client-go/util/workqueue"
2017-10-16 11:41:50 +00:00
"k8s.io/kubernetes/pkg/api/legacyscheme"
2017-03-08 07:02:34 +00:00
"k8s.io/kubernetes/pkg/controller"
2018-09-04 18:16:48 +00:00
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
2015-08-20 12:55:28 +00:00
)
2017-05-24 21:09:47 +00:00
var (
scaleUpLimitFactor = 2.0
scaleUpLimitMinimum = 4.0
2015-12-13 08:54:43 +00:00
)
2018-08-31 07:32:01 +00:00
type timestampedRecommendation struct {
recommendation int32
timestamp time . Time
}
2017-09-25 13:25:05 +00:00
// HorizontalController is responsible for the synchronizing HPA objects stored
// in the system with the actual deployments/replication controllers they
// control.
2015-09-10 13:10:07 +00:00
type HorizontalController struct {
2017-10-11 18:31:04 +00:00
scaleNamespacer scaleclient . ScalesGetter
2016-12-02 20:18:26 +00:00
hpaNamespacer autoscalingclient . HorizontalPodAutoscalersGetter
2017-10-11 18:31:04 +00:00
mapper apimeta . RESTMapper
2015-11-02 15:18:53 +00:00
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
replicaCalc * ReplicaCalculator
2015-12-13 08:54:43 +00:00
eventRecorder record . EventRecorder
2016-03-02 08:29:17 +00:00
2018-08-31 07:32:01 +00:00
downscaleStabilisationWindow time . Duration
2017-02-18 10:32:38 +00:00
2017-02-09 19:59:19 +00:00
// hpaLister is able to list/get HPAs from the shared cache from the informer passed in to
// NewHorizontalController.
hpaLister autoscalinglisters . HorizontalPodAutoscalerLister
hpaListerSynced cache . InformerSynced
2017-03-08 07:02:34 +00:00
2018-09-04 18:16:48 +00:00
// podLister is able to list/get Pods from the shared cache from the informer passed in to
// NewHorizontalController.
podLister corelisters . PodLister
podListerSynced cache . InformerSynced
2017-03-08 07:02:34 +00:00
// Controllers that need to be synced
queue workqueue . RateLimitingInterface
2018-08-31 07:32:01 +00:00
// Latest unstabilized recommendations for each autoscaler.
recommendations map [ string ] [ ] timestampedRecommendation
2015-08-25 17:16:47 +00:00
}
2017-09-25 13:25:05 +00:00
// NewHorizontalController creates a new HorizontalController.
2017-02-09 19:59:19 +00:00
func NewHorizontalController (
evtNamespacer v1core . EventsGetter ,
2017-10-11 18:31:04 +00:00
scaleNamespacer scaleclient . ScalesGetter ,
2016-12-02 20:18:26 +00:00
hpaNamespacer autoscalingclient . HorizontalPodAutoscalersGetter ,
2017-10-11 18:31:04 +00:00
mapper apimeta . RESTMapper ,
2018-09-04 18:16:48 +00:00
metricsClient metricsclient . MetricsClient ,
2017-02-09 19:59:19 +00:00
hpaInformer autoscalinginformers . HorizontalPodAutoscalerInformer ,
2018-09-04 18:16:48 +00:00
podInformer coreinformers . PodInformer ,
2017-02-09 19:59:19 +00:00
resyncPeriod time . Duration ,
2018-08-31 07:32:01 +00:00
downscaleStabilisationWindow time . Duration ,
2018-09-04 18:16:48 +00:00
tolerance float64 ,
cpuInitializationPeriod ,
delayOfInitialReadinessStatus time . Duration ,
2017-02-18 10:32:38 +00:00
2017-02-09 19:59:19 +00:00
) * HorizontalController {
broadcaster := record . NewBroadcaster ( )
2017-08-26 09:34:38 +00:00
broadcaster . StartLogging ( glog . Infof )
2017-02-09 19:59:19 +00:00
broadcaster . StartRecordingToSink ( & v1core . EventSinkImpl { Interface : evtNamespacer . Events ( "" ) } )
2017-07-15 05:25:54 +00:00
recorder := broadcaster . NewRecorder ( scheme . Scheme , v1 . EventSource { Component : "horizontal-pod-autoscaler" } )
2017-02-09 19:59:19 +00:00
2017-03-08 07:02:34 +00:00
hpaController := & HorizontalController {
2018-08-31 07:32:01 +00:00
eventRecorder : recorder ,
scaleNamespacer : scaleNamespacer ,
hpaNamespacer : hpaNamespacer ,
downscaleStabilisationWindow : downscaleStabilisationWindow ,
queue : workqueue . NewNamedRateLimitingQueue ( NewDefaultHPARateLimiter ( resyncPeriod ) , "horizontalpodautoscaler" ) ,
mapper : mapper ,
recommendations : map [ string ] [ ] timestampedRecommendation { } ,
2017-02-09 19:59:19 +00:00
}
hpaInformer . Informer ( ) . AddEventHandlerWithResyncPeriod (
2016-09-14 18:35:38 +00:00
cache . ResourceEventHandlerFuncs {
2017-03-08 07:02:34 +00:00
AddFunc : hpaController . enqueueHPA ,
UpdateFunc : hpaController . updateHPA ,
DeleteFunc : hpaController . deleteHPA ,
2016-03-02 08:29:17 +00:00
} ,
2017-02-09 19:59:19 +00:00
resyncPeriod ,
2016-03-02 08:29:17 +00:00
)
2017-03-08 07:02:34 +00:00
hpaController . hpaLister = hpaInformer . Lister ( )
hpaController . hpaListerSynced = hpaInformer . Informer ( ) . HasSynced
2016-03-02 08:29:17 +00:00
2018-09-04 18:16:48 +00:00
hpaController . podLister = podInformer . Lister ( )
hpaController . podListerSynced = podInformer . Informer ( ) . HasSynced
replicaCalc := NewReplicaCalculator (
metricsClient ,
hpaController . podLister ,
tolerance ,
cpuInitializationPeriod ,
delayOfInitialReadinessStatus ,
)
hpaController . replicaCalc = replicaCalc
2017-03-08 07:02:34 +00:00
return hpaController
2015-08-17 12:18:26 +00:00
}
2017-09-25 13:25:05 +00:00
// Run begins watching and syncing.
2016-03-02 08:29:17 +00:00
func ( a * HorizontalController ) Run ( stopCh <- chan struct { } ) {
defer utilruntime . HandleCrash ( )
2017-03-08 07:02:34 +00:00
defer a . queue . ShutDown ( )
2017-02-09 19:59:19 +00:00
2017-04-12 19:49:17 +00:00
glog . Infof ( "Starting HPA controller" )
defer glog . Infof ( "Shutting down HPA controller" )
2017-02-09 19:59:19 +00:00
2018-09-04 18:16:48 +00:00
if ! controller . WaitForCacheSync ( "HPA" , stopCh , a . hpaListerSynced , a . podListerSynced ) {
2017-02-09 19:59:19 +00:00
return
}
2017-03-08 07:02:34 +00:00
// start a single worker (we may wish to start more in the future)
go wait . Until ( a . worker , time . Second , stopCh )
2016-03-02 08:29:17 +00:00
<- stopCh
2015-08-17 12:18:26 +00:00
}
2017-03-08 07:02:34 +00:00
// obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item.
func ( a * HorizontalController ) updateHPA ( old , cur interface { } ) {
a . enqueueHPA ( cur )
}
// obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item.
func ( a * HorizontalController ) enqueueHPA ( obj interface { } ) {
key , err := controller . KeyFunc ( obj )
if err != nil {
utilruntime . HandleError ( fmt . Errorf ( "couldn't get key for object %+v: %v" , obj , err ) )
return
}
// always add rate-limitted so we don't fetch metrics more that once per resync interval
a . queue . AddRateLimited ( key )
}
func ( a * HorizontalController ) deleteHPA ( obj interface { } ) {
key , err := controller . KeyFunc ( obj )
if err != nil {
utilruntime . HandleError ( fmt . Errorf ( "couldn't get key for object %+v: %v" , obj , err ) )
return
}
// TODO: could we leak if we fail to get the key?
a . queue . Forget ( key )
}
func ( a * HorizontalController ) worker ( ) {
for a . processNextWorkItem ( ) {
}
glog . Infof ( "horizontal pod autoscaler controller worker shutting down" )
}
func ( a * HorizontalController ) processNextWorkItem ( ) bool {
key , quit := a . queue . Get ( )
if quit {
return false
}
defer a . queue . Done ( key )
err := a . reconcileKey ( key . ( string ) )
if err == nil {
// don't "forget" here because we want to only process a given HPA once per resync interval
return true
}
a . queue . AddRateLimited ( key )
utilruntime . HandleError ( err )
return true
}
2018-07-04 14:20:09 +00:00
// computeReplicasForMetrics computes the desired number of replicas for the metric specifications listed in the HPA,
// returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
// all metrics computed.
2017-10-11 18:31:04 +00:00
func ( a * HorizontalController ) computeReplicasForMetrics ( hpa * autoscalingv2 . HorizontalPodAutoscaler , scale * autoscalingv1 . Scale ,
2016-12-02 20:18:26 +00:00
metricSpecs [ ] autoscalingv2 . MetricSpec ) ( replicas int32 , metric string , statuses [ ] autoscalingv2 . MetricStatus , timestamp time . Time , err error ) {
2016-01-29 11:20:19 +00:00
2016-07-11 06:42:51 +00:00
currentReplicas := scale . Status . Replicas
2016-12-02 20:18:26 +00:00
statuses = make ( [ ] autoscalingv2 . MetricStatus , len ( metricSpecs ) )
2016-01-29 11:20:19 +00:00
2016-12-02 20:18:26 +00:00
for i , metricSpec := range metricSpecs {
2017-10-11 18:31:04 +00:00
if scale . Status . Selector == "" {
2016-03-09 00:27:13 +00:00
errMsg := "selector is required"
2016-11-18 20:50:17 +00:00
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "SelectorRequired" , errMsg )
2017-05-24 21:09:47 +00:00
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "InvalidSelector" , "the HPA target's scale is missing a selector" )
2016-12-02 20:18:26 +00:00
return 0 , "" , nil , time . Time { } , fmt . Errorf ( errMsg )
2016-03-09 00:27:13 +00:00
}
2017-10-11 18:31:04 +00:00
selector , err := labels . Parse ( scale . Status . Selector )
2016-03-09 00:27:13 +00:00
if err != nil {
2016-12-02 20:18:26 +00:00
errMsg := fmt . Sprintf ( "couldn't convert selector into a corresponding internal selector object: %v" , err )
2016-11-18 20:50:17 +00:00
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "InvalidSelector" , errMsg )
2017-05-24 21:09:47 +00:00
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "InvalidSelector" , errMsg )
2016-12-02 20:18:26 +00:00
return 0 , "" , nil , time . Time { } , fmt . Errorf ( errMsg )
2016-03-09 00:27:13 +00:00
}
2016-12-02 20:18:26 +00:00
var replicaCountProposal int32
var timestampProposal time . Time
var metricNameProposal string
switch metricSpec . Type {
case autoscalingv2 . ObjectMetricSourceType :
2018-06-28 18:28:13 +00:00
metricSelector , err := metav1 . LabelSelectorAsSelector ( metricSpec . Object . Metric . Selector )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetObjectMetric" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetObjectMetric" , "the HPA was unable to compute the replica count: %v" , err )
return 0 , "" , nil , time . Time { } , fmt . Errorf ( "failed to get object metric value: %v" , err )
}
replicaCountProposal , timestampProposal , metricNameProposal , err = a . computeStatusForObjectMetric ( currentReplicas , metricSpec , hpa , selector , & statuses [ i ] , metricSelector )
2016-12-02 20:18:26 +00:00
if err != nil {
return 0 , "" , nil , time . Time { } , fmt . Errorf ( "failed to get object metric value: %v" , err )
}
case autoscalingv2 . PodsMetricSourceType :
2018-06-28 18:28:13 +00:00
metricSelector , err := metav1 . LabelSelectorAsSelector ( metricSpec . Pods . Metric . Selector )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetPodsMetric" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetPodsMetric" , "the HPA was unable to compute the replica count: %v" , err )
return 0 , "" , nil , time . Time { } , fmt . Errorf ( "failed to get pods metric value: %v" , err )
}
replicaCountProposal , timestampProposal , metricNameProposal , err = a . computeStatusForPodsMetric ( currentReplicas , metricSpec , hpa , selector , & statuses [ i ] , metricSelector )
2016-12-02 20:18:26 +00:00
if err != nil {
2018-07-04 14:20:09 +00:00
return 0 , "" , nil , time . Time { } , fmt . Errorf ( "failed to get object metric value: %v" , err )
2016-12-02 20:18:26 +00:00
}
case autoscalingv2 . ResourceMetricSourceType :
2018-07-04 14:20:09 +00:00
replicaCountProposal , timestampProposal , metricNameProposal , err = a . computeStatusForResourceMetric ( currentReplicas , metricSpec , hpa , selector , & statuses [ i ] )
if err != nil {
return 0 , "" , nil , time . Time { } , err
2016-12-02 20:18:26 +00:00
}
2018-02-21 10:19:51 +00:00
case autoscalingv2 . ExternalMetricSourceType :
2018-07-04 14:20:09 +00:00
replicaCountProposal , timestampProposal , metricNameProposal , err = a . computeStatusForExternalMetric ( currentReplicas , metricSpec , hpa , selector , & statuses [ i ] )
if err != nil {
return 0 , "" , nil , time . Time { } , err
2018-02-21 10:19:51 +00:00
}
2016-12-02 20:18:26 +00:00
default :
errMsg := fmt . Sprintf ( "unknown metric source type %q" , string ( metricSpec . Type ) )
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "InvalidMetricSourceType" , errMsg )
2017-05-24 21:09:47 +00:00
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "InvalidMetricSourceType" , "the HPA was unable to compute the replica count: %s" , errMsg )
2016-12-02 20:18:26 +00:00
return 0 , "" , nil , time . Time { } , fmt . Errorf ( errMsg )
2016-01-29 11:20:19 +00:00
}
2016-12-02 20:18:26 +00:00
if replicas == 0 || replicaCountProposal > replicas {
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
timestamp = timestampProposal
2016-01-29 11:20:19 +00:00
replicas = replicaCountProposal
2016-12-02 20:18:26 +00:00
metric = metricNameProposal
2016-01-29 11:20:19 +00:00
}
2016-11-08 12:11:08 +00:00
}
2018-01-29 18:01:43 +00:00
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionTrue , "ValidMetricFound" , "the HPA was able to successfully calculate a replica count from %s" , metric )
2016-12-02 20:18:26 +00:00
return replicas , metric , statuses , timestamp , nil
2016-01-29 11:20:19 +00:00
}
2017-03-08 07:02:34 +00:00
func ( a * HorizontalController ) reconcileKey ( key string ) error {
namespace , name , err := cache . SplitMetaNamespaceKey ( key )
if err != nil {
return err
}
hpa , err := a . hpaLister . HorizontalPodAutoscalers ( namespace ) . Get ( name )
if errors . IsNotFound ( err ) {
2018-08-07 02:00:31 +00:00
glog . Infof ( "Horizontal Pod Autoscaler %s has been deleted in %s" , name , namespace )
2018-08-31 07:32:01 +00:00
delete ( a . recommendations , key )
2017-03-08 07:02:34 +00:00
return nil
}
2018-08-31 07:32:01 +00:00
return a . reconcileAutoscaler ( hpa , key )
2017-03-08 07:02:34 +00:00
}
2018-07-04 14:20:09 +00:00
// computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
2018-06-28 18:28:13 +00:00
func ( a * HorizontalController ) computeStatusForObjectMetric ( currentReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler , selector labels . Selector , status * autoscalingv2 . MetricStatus , metricSelector labels . Selector ) ( int32 , time . Time , string , error ) {
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetObjectMetricReplicas ( currentReplicas , metricSpec . Object . Target . Value . MilliValue ( ) , metricSpec . Object . Metric . Name , hpa . Namespace , & metricSpec . Object . DescribedObject , selector , metricSelector )
2018-07-04 14:20:09 +00:00
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetObjectMetric" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetObjectMetric" , "the HPA was unable to compute the replica count: %v" , err )
return 0 , timestampProposal , "" , err
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricStatus {
2018-06-28 18:28:13 +00:00
DescribedObject : metricSpec . Object . DescribedObject ,
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . Object . Metric . Name ,
Selector : metricSpec . Object . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
Value : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
2018-07-04 14:20:09 +00:00
} ,
}
2018-06-28 18:28:13 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "%s metric %s" , metricSpec . Object . DescribedObject . Kind , metricSpec . Object . Metric . Name ) , nil
2018-07-04 14:20:09 +00:00
}
// computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType.
2018-06-28 18:28:13 +00:00
func ( a * HorizontalController ) computeStatusForPodsMetric ( currentReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler , selector labels . Selector , status * autoscalingv2 . MetricStatus , metricSelector labels . Selector ) ( int32 , time . Time , string , error ) {
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetMetricReplicas ( currentReplicas , metricSpec . Pods . Target . AverageValue . MilliValue ( ) , metricSpec . Pods . Metric . Name , hpa . Namespace , selector , metricSelector )
2018-07-04 14:20:09 +00:00
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetPodsMetric" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetPodsMetric" , "the HPA was unable to compute the replica count: %v" , err )
return 0 , timestampProposal , "" , err
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricStatus {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . Pods . Metric . Name ,
Selector : metricSpec . Pods . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
AverageValue : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
2018-07-04 14:20:09 +00:00
} ,
}
2018-06-28 18:28:13 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "pods metric %s" , metricSpec . Pods . Metric . Name ) , nil
2018-07-04 14:20:09 +00:00
}
// computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
func ( a * HorizontalController ) computeStatusForResourceMetric ( currentReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler , selector labels . Selector , status * autoscalingv2 . MetricStatus ) ( int32 , time . Time , string , error ) {
2018-06-28 18:28:13 +00:00
if metricSpec . Resource . Target . AverageValue != nil {
2018-07-04 14:20:09 +00:00
var rawProposal int64
2018-06-28 18:28:13 +00:00
replicaCountProposal , rawProposal , timestampProposal , err := a . replicaCalc . GetRawResourceReplicas ( currentReplicas , metricSpec . Resource . Target . AverageValue . MilliValue ( ) , metricSpec . Resource . Name , hpa . Namespace , selector )
2018-07-04 14:20:09 +00:00
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetResourceMetric" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetResourceMetric" , "the HPA was unable to compute the replica count: %v" , err )
return 0 , time . Time { } , "" , fmt . Errorf ( "failed to get %s utilization: %v" , metricSpec . Resource . Name , err )
}
metricNameProposal := fmt . Sprintf ( "%s resource" , metricSpec . Resource . Name )
status = & autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ResourceMetricSourceType ,
Resource : & autoscalingv2 . ResourceMetricStatus {
2018-06-28 18:28:13 +00:00
Name : metricSpec . Resource . Name ,
Current : autoscalingv2 . MetricValueStatus {
AverageValue : resource . NewMilliQuantity ( rawProposal , resource . DecimalSI ) ,
} ,
2018-07-04 14:20:09 +00:00
} ,
}
return replicaCountProposal , timestampProposal , metricNameProposal , nil
} else {
2018-06-28 18:28:13 +00:00
if metricSpec . Resource . Target . AverageUtilization == nil {
2018-07-04 14:20:09 +00:00
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetResourceMetric" , errMsg )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetResourceMetric" , "the HPA was unable to compute the replica count: %s" , errMsg )
return 0 , time . Time { } , "" , fmt . Errorf ( errMsg )
}
2018-06-28 18:28:13 +00:00
targetUtilization := * metricSpec . Resource . Target . AverageUtilization
2018-07-04 14:20:09 +00:00
var percentageProposal int32
var rawProposal int64
replicaCountProposal , percentageProposal , rawProposal , timestampProposal , err := a . replicaCalc . GetResourceReplicas ( currentReplicas , targetUtilization , metricSpec . Resource . Name , hpa . Namespace , selector )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetResourceMetric" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetResourceMetric" , "the HPA was unable to compute the replica count: %v" , err )
return 0 , time . Time { } , "" , fmt . Errorf ( "failed to get %s utilization: %v" , metricSpec . Resource . Name , err )
}
metricNameProposal := fmt . Sprintf ( "%s resource utilization (percentage of request)" , metricSpec . Resource . Name )
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ResourceMetricSourceType ,
Resource : & autoscalingv2 . ResourceMetricStatus {
Name : metricSpec . Resource . Name ,
2018-06-28 18:28:13 +00:00
Current : autoscalingv2 . MetricValueStatus {
AverageUtilization : & percentageProposal ,
AverageValue : resource . NewMilliQuantity ( rawProposal , resource . DecimalSI ) ,
} ,
2018-07-04 14:20:09 +00:00
} ,
}
return replicaCountProposal , timestampProposal , metricNameProposal , nil
}
}
// computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
func ( a * HorizontalController ) computeStatusForExternalMetric ( currentReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler , selector labels . Selector , status * autoscalingv2 . MetricStatus ) ( int32 , time . Time , string , error ) {
2018-06-28 18:28:13 +00:00
if metricSpec . External . Target . AverageValue != nil {
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetExternalPerPodMetricReplicas ( currentReplicas , metricSpec . External . Target . AverageValue . MilliValue ( ) , metricSpec . External . Metric . Name , hpa . Namespace , metricSpec . External . Metric . Selector )
2018-07-04 14:20:09 +00:00
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetExternalMetric" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetExternalMetric" , "the HPA was unable to compute the replica count: %v" , err )
2018-06-28 18:28:13 +00:00
return 0 , time . Time { } , "" , fmt . Errorf ( "failed to get %s external metric: %v" , metricSpec . External . Metric . Name , err )
2018-07-04 14:20:09 +00:00
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricStatus {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . External . Metric . Name ,
Selector : metricSpec . External . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
AverageValue : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
2018-07-04 14:20:09 +00:00
} ,
}
2018-06-28 18:28:13 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "external metric %s(%+v)" , metricSpec . External . Metric . Name , metricSpec . External . Metric . Selector ) , nil
2018-07-04 14:20:09 +00:00
}
2018-06-28 18:28:13 +00:00
if metricSpec . External . Target . Value != nil {
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetExternalMetricReplicas ( currentReplicas , metricSpec . External . Target . Value . MilliValue ( ) , metricSpec . External . Metric . Name , hpa . Namespace , metricSpec . External . Metric . Selector , selector )
2018-07-04 14:20:09 +00:00
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetExternalMetric" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetExternalMetric" , "the HPA was unable to compute the replica count: %v" , err )
2018-06-28 18:28:13 +00:00
return 0 , time . Time { } , "" , fmt . Errorf ( "failed to get external metric %s: %v" , metricSpec . External . Metric . Name , err )
2018-07-04 14:20:09 +00:00
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricStatus {
2018-06-28 18:28:13 +00:00
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . External . Metric . Name ,
Selector : metricSpec . External . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
Value : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
2018-07-04 14:20:09 +00:00
} ,
}
2018-06-28 18:28:13 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "external metric %s(%+v)" , metricSpec . External . Metric . Name , metricSpec . External . Metric . Selector ) , nil
2018-07-04 14:20:09 +00:00
}
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetExternalMetric" , errMsg )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "FailedGetExternalMetric" , "the HPA was unable to compute the replica count: %s" , errMsg )
return 0 , time . Time { } , "" , fmt . Errorf ( errMsg )
}
2018-08-31 07:32:01 +00:00
func ( a * HorizontalController ) reconcileAutoscaler ( hpav1Shared * autoscalingv1 . HorizontalPodAutoscaler , key string ) error {
2017-02-21 15:46:29 +00:00
// make a copy so that we never mutate the shared informer cache (conversion can mutate the object)
2017-08-15 12:14:21 +00:00
hpav1 := hpav1Shared . DeepCopy ( )
2017-02-21 15:46:29 +00:00
// then, convert to autoscaling/v2, which makes our lives easier when calculating metrics
2017-09-25 13:43:04 +00:00
hpaRaw , err := unsafeConvertToVersionVia ( hpav1 , autoscalingv2 . SchemeGroupVersion )
2016-12-02 20:18:26 +00:00
if err != nil {
a . eventRecorder . Event ( hpav1 , v1 . EventTypeWarning , "FailedConvertHPA" , err . Error ( ) )
return fmt . Errorf ( "failed to convert the given HPA to %s: %v" , autoscalingv2 . SchemeGroupVersion . String ( ) , err )
}
hpa := hpaRaw . ( * autoscalingv2 . HorizontalPodAutoscaler )
2017-08-15 12:14:21 +00:00
hpaStatusOriginal := hpa . Status . DeepCopy ( )
2016-12-02 20:18:26 +00:00
2016-05-05 10:27:24 +00:00
reference := fmt . Sprintf ( "%s/%s/%s" , hpa . Spec . ScaleTargetRef . Kind , hpa . Namespace , hpa . Spec . ScaleTargetRef . Name )
2015-09-14 13:08:43 +00:00
2017-10-11 18:31:04 +00:00
targetGV , err := schema . ParseGroupVersion ( hpa . Spec . ScaleTargetRef . APIVersion )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetScale" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionFalse , "FailedGetScale" , "the HPA controller was unable to get the target's current scale: %v" , err )
a . updateStatusIfNeeded ( hpaStatusOriginal , hpa )
return fmt . Errorf ( "invalid API version in scale target reference: %v" , err )
}
targetGK := schema . GroupKind {
Group : targetGV . Group ,
Kind : hpa . Spec . ScaleTargetRef . Kind ,
}
mappings , err := a . mapper . RESTMappings ( targetGK )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetScale" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionFalse , "FailedGetScale" , "the HPA controller was unable to get the target's current scale: %v" , err )
a . updateStatusIfNeeded ( hpaStatusOriginal , hpa )
return fmt . Errorf ( "unable to determine resource for scale target reference: %v" , err )
}
scale , targetGR , err := a . scaleForResourceMappings ( hpa . Namespace , hpa . Spec . ScaleTargetRef . Name , mappings )
2015-08-17 12:18:26 +00:00
if err != nil {
2016-11-18 20:50:17 +00:00
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetScale" , err . Error ( ) )
2017-05-24 21:09:47 +00:00
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionFalse , "FailedGetScale" , "the HPA controller was unable to get the target's current scale: %v" , err )
2017-06-06 19:57:05 +00:00
a . updateStatusIfNeeded ( hpaStatusOriginal , hpa )
2015-09-14 13:08:43 +00:00
return fmt . Errorf ( "failed to query scale subresource for %s: %v" , reference , err )
2015-08-17 12:18:26 +00:00
}
2017-05-24 21:09:47 +00:00
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "SucceededGetScale" , "the HPA controller was able to get the target's current scale" )
2015-09-14 13:08:43 +00:00
currentReplicas := scale . Status . Replicas
2015-08-20 12:55:28 +00:00
2016-12-02 20:18:26 +00:00
var metricStatuses [ ] autoscalingv2 . MetricStatus
metricDesiredReplicas := int32 ( 0 )
metricName := ""
metricTimestamp := time . Time { }
2016-01-29 11:20:19 +00:00
2016-04-27 04:35:14 +00:00
desiredReplicas := int32 ( 0 )
2016-03-02 09:08:17 +00:00
rescaleReason := ""
2016-02-12 15:26:59 +00:00
timestamp := time . Now ( )
2016-01-29 11:20:19 +00:00
2016-11-21 09:32:00 +00:00
rescale := true
2016-07-19 15:54:38 +00:00
if scale . Spec . Replicas == 0 {
// Autoscaling is disabled for this resource
desiredReplicas = 0
2016-11-21 09:32:00 +00:00
rescale = false
2017-05-24 21:09:47 +00:00
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "ScalingDisabled" , "scaling is disabled since the replica count of the target is zero" )
2016-07-19 15:54:38 +00:00
} else if currentReplicas > hpa . Spec . MaxReplicas {
2016-03-02 09:08:17 +00:00
rescaleReason = "Current number of replicas above Spec.MaxReplicas"
2016-02-12 15:26:59 +00:00
desiredReplicas = hpa . Spec . MaxReplicas
} else if hpa . Spec . MinReplicas != nil && currentReplicas < * hpa . Spec . MinReplicas {
2016-03-02 09:08:17 +00:00
rescaleReason = "Current number of replicas below Spec.MinReplicas"
2016-02-12 15:26:59 +00:00
desiredReplicas = * hpa . Spec . MinReplicas
} else if currentReplicas == 0 {
2016-03-02 09:08:17 +00:00
rescaleReason = "Current number of replicas must be greater than 0"
2016-02-12 15:26:59 +00:00
desiredReplicas = 1
} else {
2018-07-18 12:21:00 +00:00
2016-12-02 20:18:26 +00:00
metricDesiredReplicas , metricName , metricStatuses , metricTimestamp , err = a . computeReplicasForMetrics ( hpa , scale , hpa . Spec . Metrics )
if err != nil {
2017-06-06 19:57:05 +00:00
a . setCurrentReplicasInStatus ( hpa , currentReplicas )
if err := a . updateStatusIfNeeded ( hpaStatusOriginal , hpa ) ; err != nil {
utilruntime . HandleError ( err )
}
2016-12-02 20:18:26 +00:00
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedComputeMetricsReplicas" , err . Error ( ) )
return fmt . Errorf ( "failed to compute desired number of replicas based on listed metrics for %s: %v" , reference , err )
2016-01-29 11:20:19 +00:00
}
2016-12-02 20:18:26 +00:00
glog . V ( 4 ) . Infof ( "proposing %v desired replicas (based on %s from %s) for %s" , metricDesiredReplicas , metricName , timestamp , reference )
2016-01-29 11:20:19 +00:00
2016-03-02 09:08:17 +00:00
rescaleMetric := ""
2016-12-02 20:18:26 +00:00
if metricDesiredReplicas > desiredReplicas {
desiredReplicas = metricDesiredReplicas
timestamp = metricTimestamp
rescaleMetric = metricName
2016-03-02 09:08:17 +00:00
}
if desiredReplicas > currentReplicas {
rescaleReason = fmt . Sprintf ( "%s above target" , rescaleMetric )
2016-07-11 06:42:51 +00:00
}
if desiredReplicas < currentReplicas {
2016-03-02 09:08:17 +00:00
rescaleReason = "All metrics below target"
2016-02-12 15:26:59 +00:00
}
2018-08-31 07:32:01 +00:00
desiredReplicas = a . normalizeDesiredReplicas ( hpa , key , currentReplicas , desiredReplicas )
rescale = desiredReplicas != currentReplicas
2015-09-14 13:08:43 +00:00
}
2016-02-23 10:29:40 +00:00
if rescale {
scale . Spec . Replicas = desiredReplicas
2017-10-11 18:31:04 +00:00
_ , err = a . scaleNamespacer . Scales ( hpa . Namespace ) . Update ( targetGR , scale )
2016-02-23 10:29:40 +00:00
if err != nil {
2016-11-18 20:50:17 +00:00
a . eventRecorder . Eventf ( hpa , v1 . EventTypeWarning , "FailedRescale" , "New size: %d; reason: %s; error: %v" , desiredReplicas , rescaleReason , err . Error ( ) )
2017-05-24 21:09:47 +00:00
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionFalse , "FailedUpdateScale" , "the HPA controller was unable to update the target scale: %v" , err )
2017-06-06 19:57:05 +00:00
a . setCurrentReplicasInStatus ( hpa , currentReplicas )
if err := a . updateStatusIfNeeded ( hpaStatusOriginal , hpa ) ; err != nil {
utilruntime . HandleError ( err )
}
2016-02-23 10:29:40 +00:00
return fmt . Errorf ( "failed to rescale %s: %v" , reference , err )
}
2017-05-24 21:09:47 +00:00
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "SucceededRescale" , "the HPA controller was able to update the target scale to %d" , desiredReplicas )
2016-11-18 20:50:17 +00:00
a . eventRecorder . Eventf ( hpa , v1 . EventTypeNormal , "SuccessfulRescale" , "New size: %d; reason: %s" , desiredReplicas , rescaleReason )
2016-12-22 08:14:48 +00:00
glog . Infof ( "Successful rescale of %s, old size: %d, new size: %d, reason: %s" ,
2016-03-02 09:08:17 +00:00
hpa . Name , currentReplicas , desiredReplicas , rescaleReason )
2016-02-23 10:29:40 +00:00
} else {
2016-12-02 20:18:26 +00:00
glog . V ( 4 ) . Infof ( "decided not to scale %s to %v (last scale time was %s)" , reference , desiredReplicas , hpa . Status . LastScaleTime )
2016-02-23 10:29:40 +00:00
desiredReplicas = currentReplicas
}
2017-06-06 19:57:05 +00:00
a . setStatus ( hpa , currentReplicas , desiredReplicas , metricStatuses , rescale )
return a . updateStatusIfNeeded ( hpaStatusOriginal , hpa )
2016-02-23 10:29:40 +00:00
}
2018-08-31 07:32:01 +00:00
// stabilizeRecommendation:
// - replaces old recommendation with the newest recommendation,
// - returns max of recommendations that are not older than downscaleStabilisationWindow.
func ( a * HorizontalController ) stabilizeRecommendation ( key string , prenormalizedDesiredReplicas int32 ) int32 {
maxRecommendation := prenormalizedDesiredReplicas
foundOldSample := false
oldSampleIndex := 0
cutoff := time . Now ( ) . Add ( - a . downscaleStabilisationWindow )
for i , rec := range a . recommendations [ key ] {
if rec . timestamp . Before ( cutoff ) {
foundOldSample = true
oldSampleIndex = i
} else if rec . recommendation > maxRecommendation {
maxRecommendation = rec . recommendation
}
}
if foundOldSample {
a . recommendations [ key ] [ oldSampleIndex ] = timestampedRecommendation { prenormalizedDesiredReplicas , time . Now ( ) }
} else {
a . recommendations [ key ] = append ( a . recommendations [ key ] , timestampedRecommendation { prenormalizedDesiredReplicas , time . Now ( ) } )
}
return maxRecommendation
}
2017-10-19 13:30:23 +00:00
// normalizeDesiredReplicas takes the metrics desired replicas value and normalizes it based on the appropriate conditions (i.e. < maxReplicas, >
// minReplicas, etc...)
2018-08-31 07:32:01 +00:00
func ( a * HorizontalController ) normalizeDesiredReplicas ( hpa * autoscalingv2 . HorizontalPodAutoscaler , key string , currentReplicas int32 , prenormalizedDesiredReplicas int32 ) int32 {
stabilizedRecommendation := a . stabilizeRecommendation ( key , prenormalizedDesiredReplicas )
if stabilizedRecommendation != prenormalizedDesiredReplicas {
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "ScaleDownStabilized" , "recent recommendations were higher than current one, applying the highest recent recommendation" )
} else {
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "ReadyForNewScale" , "recommended size matches current size" )
}
2017-10-19 13:30:23 +00:00
var minReplicas int32
if hpa . Spec . MinReplicas != nil {
minReplicas = * hpa . Spec . MinReplicas
} else {
minReplicas = 0
}
2018-08-31 07:32:01 +00:00
desiredReplicas , condition , reason := convertDesiredReplicasWithRules ( currentReplicas , stabilizedRecommendation , minReplicas , hpa . Spec . MaxReplicas )
2017-10-19 13:30:23 +00:00
2018-08-31 07:32:01 +00:00
if desiredReplicas == stabilizedRecommendation {
2017-10-19 13:30:23 +00:00
setCondition ( hpa , autoscalingv2 . ScalingLimited , v1 . ConditionFalse , condition , reason )
} else {
setCondition ( hpa , autoscalingv2 . ScalingLimited , v1 . ConditionTrue , condition , reason )
}
return desiredReplicas
}
// convertDesiredReplicas performs the actual normalization, without depending on `HorizontalController` or `HorizontalPodAutoscaler`
func convertDesiredReplicasWithRules ( currentReplicas , desiredReplicas , hpaMinReplicas , hpaMaxReplicas int32 ) ( int32 , string , string ) {
var minimumAllowedReplicas int32
var maximumAllowedReplicas int32
var possibleLimitingCondition string
var possibleLimitingReason string
if hpaMinReplicas == 0 {
minimumAllowedReplicas = 1
possibleLimitingReason = "the desired replica count is zero"
} else {
minimumAllowedReplicas = hpaMinReplicas
possibleLimitingReason = "the desired replica count is less than the minimum replica count"
}
// Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by
// bogus CPU usage report from heapster/kubelet (like in issue #32304).
scaleUpLimit := calculateScaleUpLimit ( currentReplicas )
if hpaMaxReplicas > scaleUpLimit {
maximumAllowedReplicas = scaleUpLimit
possibleLimitingCondition = "ScaleUpLimit"
possibleLimitingReason = "the desired replica count is increasing faster than the maximum scale rate"
} else {
maximumAllowedReplicas = hpaMaxReplicas
possibleLimitingCondition = "TooManyReplicas"
possibleLimitingReason = "the desired replica count is more than the maximum replica count"
}
if desiredReplicas < minimumAllowedReplicas {
possibleLimitingCondition = "TooFewReplicas"
return minimumAllowedReplicas , possibleLimitingCondition , possibleLimitingReason
} else if desiredReplicas > maximumAllowedReplicas {
return maximumAllowedReplicas , possibleLimitingCondition , possibleLimitingReason
}
return desiredReplicas , "DesiredWithinRange" , "the desired count is within the acceptable range"
}
func calculateScaleUpLimit ( currentReplicas int32 ) int32 {
return int32 ( math . Max ( scaleUpLimitFactor * float64 ( currentReplicas ) , scaleUpLimitMinimum ) )
}
2017-10-11 18:31:04 +00:00
// scaleForResourceMappings attempts to fetch the scale for the
// resource with the given name and namespace, trying each RESTMapping
// in turn until a working one is found. If none work, the first error
// is returned. It returns both the scale, as well as the group-resource from
// the working mapping.
func ( a * HorizontalController ) scaleForResourceMappings ( namespace , name string , mappings [ ] * apimeta . RESTMapping ) ( * autoscalingv1 . Scale , schema . GroupResource , error ) {
var firstErr error
for i , mapping := range mappings {
2018-05-01 17:02:44 +00:00
targetGR := mapping . Resource . GroupResource ( )
2017-10-11 18:31:04 +00:00
scale , err := a . scaleNamespacer . Scales ( namespace ) . Get ( targetGR , name )
if err == nil {
return scale , targetGR , nil
}
// if this is the first error, remember it,
// then go on and try other mappings until we find a good one
if i == 0 {
firstErr = err
}
}
// make sure we handle an empty set of mappings
if firstErr == nil {
firstErr = fmt . Errorf ( "unrecognized resource" )
}
return nil , schema . GroupResource { } , firstErr
}
2017-06-06 19:57:05 +00:00
// setCurrentReplicasInStatus sets the current replica count in the status of the HPA.
func ( a * HorizontalController ) setCurrentReplicasInStatus ( hpa * autoscalingv2 . HorizontalPodAutoscaler , currentReplicas int32 ) {
a . setStatus ( hpa , currentReplicas , hpa . Status . DesiredReplicas , hpa . Status . CurrentMetrics , false )
2016-02-23 12:05:07 +00:00
}
2017-06-06 19:57:05 +00:00
// setStatus recreates the status of the given HPA, updating the current and
// desired replicas, as well as the metric statuses
func ( a * HorizontalController ) setStatus ( hpa * autoscalingv2 . HorizontalPodAutoscaler , currentReplicas , desiredReplicas int32 , metricStatuses [ ] autoscalingv2 . MetricStatus , rescale bool ) {
2016-12-02 20:18:26 +00:00
hpa . Status = autoscalingv2 . HorizontalPodAutoscalerStatus {
CurrentReplicas : currentReplicas ,
DesiredReplicas : desiredReplicas ,
LastScaleTime : hpa . Status . LastScaleTime ,
CurrentMetrics : metricStatuses ,
2017-05-24 21:09:47 +00:00
Conditions : hpa . Status . Conditions ,
2016-01-29 11:20:19 +00:00
}
2015-09-14 13:08:43 +00:00
if rescale {
2016-12-03 18:57:26 +00:00
now := metav1 . NewTime ( time . Now ( ) )
2015-10-13 15:24:23 +00:00
hpa . Status . LastScaleTime = & now
2015-09-14 13:08:43 +00:00
}
2017-06-06 19:57:05 +00:00
}
2015-09-14 13:08:43 +00:00
2017-06-06 19:57:05 +00:00
// updateStatusIfNeeded calls updateStatus only if the status of the new HPA is not the same as the old status
func ( a * HorizontalController ) updateStatusIfNeeded ( oldStatus * autoscalingv2 . HorizontalPodAutoscalerStatus , newHPA * autoscalingv2 . HorizontalPodAutoscaler ) error {
// skip a write if we wouldn't need to update
if apiequality . Semantic . DeepEqual ( oldStatus , & newHPA . Status ) {
return nil
}
return a . updateStatus ( newHPA )
2017-05-24 21:09:47 +00:00
}
2017-06-06 19:57:05 +00:00
// updateStatus actually does the update request for the status of the given HPA
func ( a * HorizontalController ) updateStatus ( hpa * autoscalingv2 . HorizontalPodAutoscaler ) error {
2016-12-02 20:18:26 +00:00
// convert back to autoscalingv1
2017-09-25 13:43:04 +00:00
hpaRaw , err := unsafeConvertToVersionVia ( hpa , autoscalingv1 . SchemeGroupVersion )
2016-12-02 20:18:26 +00:00
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedConvertHPA" , err . Error ( ) )
return fmt . Errorf ( "failed to convert the given HPA to %s: %v" , autoscalingv2 . SchemeGroupVersion . String ( ) , err )
}
hpav1 := hpaRaw . ( * autoscalingv1 . HorizontalPodAutoscaler )
_ , err = a . hpaNamespacer . HorizontalPodAutoscalers ( hpav1 . Namespace ) . UpdateStatus ( hpav1 )
2015-09-14 13:08:43 +00:00
if err != nil {
2016-11-18 20:50:17 +00:00
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedUpdateStatus" , err . Error ( ) )
2015-09-14 13:08:43 +00:00
return fmt . Errorf ( "failed to update status for %s: %v" , hpa . Name , err )
}
2016-03-02 08:29:17 +00:00
glog . V ( 2 ) . Infof ( "Successfully updated status for %s" , hpa . Name )
2015-08-17 12:18:26 +00:00
return nil
}
2017-05-24 21:09:47 +00:00
2017-10-16 14:28:16 +00:00
// unsafeConvertToVersionVia is like Scheme.UnsafeConvertToVersion, but it does so via an internal version first.
2017-09-25 13:43:04 +00:00
// We use it since working with v2alpha1 is convenient here, but we want to use the v1 client (and
// can't just use the internal version). Note that conversion mutates the object, so you need to deepcopy
// *before* you call this if the input object came out of a shared cache.
func unsafeConvertToVersionVia ( obj runtime . Object , externalVersion schema . GroupVersion ) ( runtime . Object , error ) {
2017-10-16 11:41:50 +00:00
objInt , err := legacyscheme . Scheme . UnsafeConvertToVersion ( obj , schema . GroupVersion { Group : externalVersion . Group , Version : runtime . APIVersionInternal } )
2017-09-25 13:43:04 +00:00
if err != nil {
return nil , fmt . Errorf ( "failed to convert the given object to the internal version: %v" , err )
}
2017-10-16 11:41:50 +00:00
objExt , err := legacyscheme . Scheme . UnsafeConvertToVersion ( objInt , externalVersion )
2017-09-25 13:43:04 +00:00
if err != nil {
return nil , fmt . Errorf ( "failed to convert the given object back to the external version: %v" , err )
}
return objExt , err
}
2017-05-24 21:09:47 +00:00
// setCondition sets the specific condition type on the given HPA to the specified value with the given reason
// and message. The message and args are treated like a format string. The condition will be added if it is
// not present.
func setCondition ( hpa * autoscalingv2 . HorizontalPodAutoscaler , conditionType autoscalingv2 . HorizontalPodAutoscalerConditionType , status v1 . ConditionStatus , reason , message string , args ... interface { } ) {
hpa . Status . Conditions = setConditionInList ( hpa . Status . Conditions , conditionType , status , reason , message , args ... )
}
// setConditionInList sets the specific condition type on the given HPA to the specified value with the given
// reason and message. The message and args are treated like a format string. The condition will be added if
// it is not present. The new list will be returned.
func setConditionInList ( inputList [ ] autoscalingv2 . HorizontalPodAutoscalerCondition , conditionType autoscalingv2 . HorizontalPodAutoscalerConditionType , status v1 . ConditionStatus , reason , message string , args ... interface { } ) [ ] autoscalingv2 . HorizontalPodAutoscalerCondition {
resList := inputList
var existingCond * autoscalingv2 . HorizontalPodAutoscalerCondition
for i , condition := range resList {
if condition . Type == conditionType {
// can't take a pointer to an iteration variable
existingCond = & resList [ i ]
break
}
}
if existingCond == nil {
resList = append ( resList , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : conditionType ,
} )
existingCond = & resList [ len ( resList ) - 1 ]
}
if existingCond . Status != status {
existingCond . LastTransitionTime = metav1 . Now ( )
}
existingCond . Status = status
existingCond . Reason = reason
existingCond . Message = fmt . Sprintf ( message , args ... )
return resList
}