2019-01-12 04:58:27 +00:00
/ *
Copyright 2015 The Kubernetes Authors .
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package podautoscaler
import (
2020-03-26 21:07:15 +00:00
"context"
2019-01-12 04:58:27 +00:00
"fmt"
"math"
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
2019-07-14 07:58:54 +00:00
v1 "k8s.io/api/core/v1"
2019-01-12 04:58:27 +00:00
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
autoscalinginformers "k8s.io/client-go/informers/autoscaling/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes/scheme"
autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1"
corelisters "k8s.io/client-go/listers/core/v1"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
2020-08-10 17:43:49 +00:00
"k8s.io/klog/v2"
2019-01-12 04:58:27 +00:00
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/controller"
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
)
var (
scaleUpLimitFactor = 2.0
scaleUpLimitMinimum = 4.0
)
type timestampedRecommendation struct {
recommendation int32
timestamp time . Time
}
2020-03-26 21:07:15 +00:00
type timestampedScaleEvent struct {
replicaChange int32 // positive for scaleUp, negative for scaleDown
timestamp time . Time
outdated bool
}
2019-01-12 04:58:27 +00:00
// HorizontalController is responsible for the synchronizing HPA objects stored
// in the system with the actual deployments/replication controllers they
// control.
type HorizontalController struct {
scaleNamespacer scaleclient . ScalesGetter
hpaNamespacer autoscalingclient . HorizontalPodAutoscalersGetter
mapper apimeta . RESTMapper
replicaCalc * ReplicaCalculator
eventRecorder record . EventRecorder
downscaleStabilisationWindow time . Duration
// hpaLister is able to list/get HPAs from the shared cache from the informer passed in to
// NewHorizontalController.
hpaLister autoscalinglisters . HorizontalPodAutoscalerLister
hpaListerSynced cache . InformerSynced
// podLister is able to list/get Pods from the shared cache from the informer passed in to
// NewHorizontalController.
podLister corelisters . PodLister
podListerSynced cache . InformerSynced
// Controllers that need to be synced
queue workqueue . RateLimitingInterface
// Latest unstabilized recommendations for each autoscaler.
recommendations map [ string ] [ ] timestampedRecommendation
2020-03-26 21:07:15 +00:00
// Latest autoscaler events
scaleUpEvents map [ string ] [ ] timestampedScaleEvent
scaleDownEvents map [ string ] [ ] timestampedScaleEvent
2019-01-12 04:58:27 +00:00
}
// NewHorizontalController creates a new HorizontalController.
func NewHorizontalController (
evtNamespacer v1core . EventsGetter ,
scaleNamespacer scaleclient . ScalesGetter ,
hpaNamespacer autoscalingclient . HorizontalPodAutoscalersGetter ,
mapper apimeta . RESTMapper ,
metricsClient metricsclient . MetricsClient ,
hpaInformer autoscalinginformers . HorizontalPodAutoscalerInformer ,
podInformer coreinformers . PodInformer ,
resyncPeriod time . Duration ,
downscaleStabilisationWindow time . Duration ,
tolerance float64 ,
cpuInitializationPeriod ,
delayOfInitialReadinessStatus time . Duration ,
) * HorizontalController {
broadcaster := record . NewBroadcaster ( )
2020-08-10 17:43:49 +00:00
broadcaster . StartStructuredLogging ( 0 )
2019-01-12 04:58:27 +00:00
broadcaster . StartRecordingToSink ( & v1core . EventSinkImpl { Interface : evtNamespacer . Events ( "" ) } )
recorder := broadcaster . NewRecorder ( scheme . Scheme , v1 . EventSource { Component : "horizontal-pod-autoscaler" } )
hpaController := & HorizontalController {
eventRecorder : recorder ,
scaleNamespacer : scaleNamespacer ,
hpaNamespacer : hpaNamespacer ,
downscaleStabilisationWindow : downscaleStabilisationWindow ,
queue : workqueue . NewNamedRateLimitingQueue ( NewDefaultHPARateLimiter ( resyncPeriod ) , "horizontalpodautoscaler" ) ,
mapper : mapper ,
recommendations : map [ string ] [ ] timestampedRecommendation { } ,
2020-03-26 21:07:15 +00:00
scaleUpEvents : map [ string ] [ ] timestampedScaleEvent { } ,
scaleDownEvents : map [ string ] [ ] timestampedScaleEvent { } ,
2019-01-12 04:58:27 +00:00
}
hpaInformer . Informer ( ) . AddEventHandlerWithResyncPeriod (
cache . ResourceEventHandlerFuncs {
AddFunc : hpaController . enqueueHPA ,
UpdateFunc : hpaController . updateHPA ,
DeleteFunc : hpaController . deleteHPA ,
} ,
resyncPeriod ,
)
hpaController . hpaLister = hpaInformer . Lister ( )
hpaController . hpaListerSynced = hpaInformer . Informer ( ) . HasSynced
hpaController . podLister = podInformer . Lister ( )
hpaController . podListerSynced = podInformer . Informer ( ) . HasSynced
replicaCalc := NewReplicaCalculator (
metricsClient ,
hpaController . podLister ,
tolerance ,
cpuInitializationPeriod ,
delayOfInitialReadinessStatus ,
)
hpaController . replicaCalc = replicaCalc
return hpaController
}
// Run begins watching and syncing.
func ( a * HorizontalController ) Run ( stopCh <- chan struct { } ) {
defer utilruntime . HandleCrash ( )
defer a . queue . ShutDown ( )
klog . Infof ( "Starting HPA controller" )
defer klog . Infof ( "Shutting down HPA controller" )
2019-09-27 21:51:53 +00:00
if ! cache . WaitForNamedCacheSync ( "HPA" , stopCh , a . hpaListerSynced , a . podListerSynced ) {
2019-01-12 04:58:27 +00:00
return
}
// start a single worker (we may wish to start more in the future)
go wait . Until ( a . worker , time . Second , stopCh )
<- stopCh
}
// obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item.
func ( a * HorizontalController ) updateHPA ( old , cur interface { } ) {
a . enqueueHPA ( cur )
}
// obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item.
func ( a * HorizontalController ) enqueueHPA ( obj interface { } ) {
key , err := controller . KeyFunc ( obj )
if err != nil {
utilruntime . HandleError ( fmt . Errorf ( "couldn't get key for object %+v: %v" , obj , err ) )
return
}
2019-04-07 17:07:55 +00:00
// Requests are always added to queue with resyncPeriod delay. If there's already
// request for the HPA in the queue then a new request is always dropped. Requests spend resync
// interval in queue so HPAs are processed every resync interval.
2019-01-12 04:58:27 +00:00
a . queue . AddRateLimited ( key )
}
func ( a * HorizontalController ) deleteHPA ( obj interface { } ) {
key , err := controller . KeyFunc ( obj )
if err != nil {
utilruntime . HandleError ( fmt . Errorf ( "couldn't get key for object %+v: %v" , obj , err ) )
return
}
// TODO: could we leak if we fail to get the key?
a . queue . Forget ( key )
}
func ( a * HorizontalController ) worker ( ) {
for a . processNextWorkItem ( ) {
}
klog . Infof ( "horizontal pod autoscaler controller worker shutting down" )
}
func ( a * HorizontalController ) processNextWorkItem ( ) bool {
key , quit := a . queue . Get ( )
if quit {
return false
}
defer a . queue . Done ( key )
2019-01-22 20:53:35 +00:00
deleted , err := a . reconcileKey ( key . ( string ) )
if err != nil {
utilruntime . HandleError ( err )
}
2019-04-07 17:07:55 +00:00
// Add request processing HPA to queue with resyncPeriod delay.
// Requests are always added to queue with resyncPeriod delay. If there's already request
// for the HPA in the queue then a new request is always dropped. Requests spend resyncPeriod
// in queue so HPAs are processed every resyncPeriod.
// Request is added here just in case last resync didn't insert request into the queue. This
// happens quite often because there is race condition between adding request after resyncPeriod
// and removing them from queue. Request can be added by resync before previous request is
// removed from queue. If we didn't add request here then in this case one request would be dropped
// and HPA would processed after 2 x resyncPeriod.
2019-01-22 20:53:35 +00:00
if ! deleted {
a . queue . AddRateLimited ( key )
2019-01-12 04:58:27 +00:00
}
return true
}
// computeReplicasForMetrics computes the desired number of replicas for the metric specifications listed in the HPA,
2019-09-27 21:51:53 +00:00
// returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
2019-01-12 04:58:27 +00:00
// all metrics computed.
func ( a * HorizontalController ) computeReplicasForMetrics ( hpa * autoscalingv2 . HorizontalPodAutoscaler , scale * autoscalingv1 . Scale ,
metricSpecs [ ] autoscalingv2 . MetricSpec ) ( replicas int32 , metric string , statuses [ ] autoscalingv2 . MetricStatus , timestamp time . Time , err error ) {
2019-08-30 18:33:25 +00:00
if scale . Status . Selector == "" {
errMsg := "selector is required"
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "SelectorRequired" , errMsg )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "InvalidSelector" , "the HPA target's scale is missing a selector" )
return 0 , "" , nil , time . Time { } , fmt . Errorf ( errMsg )
}
selector , err := labels . Parse ( scale . Status . Selector )
if err != nil {
errMsg := fmt . Sprintf ( "couldn't convert selector into a corresponding internal selector object: %v" , err )
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "InvalidSelector" , errMsg )
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "InvalidSelector" , errMsg )
return 0 , "" , nil , time . Time { } , fmt . Errorf ( errMsg )
}
2019-09-27 21:51:53 +00:00
specReplicas := scale . Spec . Replicas
statusReplicas := scale . Status . Replicas
statuses = make ( [ ] autoscalingv2 . MetricStatus , len ( metricSpecs ) )
2019-08-30 18:33:25 +00:00
invalidMetricsCount := 0
var invalidMetricError error
2019-09-27 21:51:53 +00:00
var invalidMetricCondition autoscalingv2 . HorizontalPodAutoscalerCondition
2019-08-30 18:33:25 +00:00
2019-01-12 04:58:27 +00:00
for i , metricSpec := range metricSpecs {
2019-09-27 21:51:53 +00:00
replicaCountProposal , metricNameProposal , timestampProposal , condition , err := a . computeReplicasForMetric ( hpa , metricSpec , specReplicas , statusReplicas , selector , & statuses [ i ] )
2019-01-12 04:58:27 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
if invalidMetricsCount <= 0 {
invalidMetricCondition = condition
invalidMetricError = err
}
2019-08-30 18:33:25 +00:00
invalidMetricsCount ++
2019-01-12 04:58:27 +00:00
}
2019-08-30 18:33:25 +00:00
if err == nil && ( replicas == 0 || replicaCountProposal > replicas ) {
2019-01-12 04:58:27 +00:00
timestamp = timestampProposal
replicas = replicaCountProposal
metric = metricNameProposal
}
}
2021-03-18 22:40:29 +00:00
// If all metrics are invalid or some are invalid and we would scale down,
// return an error and set the condition of the hpa based on the first invalid metric.
// Otherwise set the condition as scaling active as we're going to scale
if invalidMetricsCount >= len ( metricSpecs ) || ( invalidMetricsCount > 0 && replicas < specReplicas ) {
2019-09-27 21:51:53 +00:00
setCondition ( hpa , invalidMetricCondition . Type , invalidMetricCondition . Status , invalidMetricCondition . Reason , invalidMetricCondition . Message )
return 0 , "" , statuses , time . Time { } , fmt . Errorf ( "invalid metrics (%v invalid out of %v), first error is: %v" , invalidMetricsCount , len ( metricSpecs ) , invalidMetricError )
2019-08-30 18:33:25 +00:00
}
2019-09-27 21:51:53 +00:00
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionTrue , "ValidMetricFound" , "the HPA was able to successfully calculate a replica count from %s" , metric )
2019-01-12 04:58:27 +00:00
return replicas , metric , statuses , timestamp , nil
}
2019-12-12 01:27:03 +00:00
// Computes the desired number of replicas for a specific hpa and metric specification,
2019-09-27 21:51:53 +00:00
// returning the metric status and a proposed condition to be set on the HPA object.
2019-08-30 18:33:25 +00:00
func ( a * HorizontalController ) computeReplicasForMetric ( hpa * autoscalingv2 . HorizontalPodAutoscaler , spec autoscalingv2 . MetricSpec ,
specReplicas , statusReplicas int32 , selector labels . Selector , status * autoscalingv2 . MetricStatus ) ( replicaCountProposal int32 , metricNameProposal string ,
2019-09-27 21:51:53 +00:00
timestampProposal time . Time , condition autoscalingv2 . HorizontalPodAutoscalerCondition , err error ) {
2019-08-30 18:33:25 +00:00
switch spec . Type {
case autoscalingv2 . ObjectMetricSourceType :
metricSelector , err := metav1 . LabelSelectorAsSelector ( spec . Object . Metric . Selector )
if err != nil {
2019-09-27 21:51:53 +00:00
condition := a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetObjectMetric" , err )
return 0 , "" , time . Time { } , condition , fmt . Errorf ( "failed to get object metric value: %v" , err )
2019-08-30 18:33:25 +00:00
}
2019-09-27 21:51:53 +00:00
replicaCountProposal , timestampProposal , metricNameProposal , condition , err = a . computeStatusForObjectMetric ( specReplicas , statusReplicas , spec , hpa , selector , status , metricSelector )
2019-08-30 18:33:25 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
return 0 , "" , time . Time { } , condition , fmt . Errorf ( "failed to get object metric value: %v" , err )
2019-08-30 18:33:25 +00:00
}
case autoscalingv2 . PodsMetricSourceType :
metricSelector , err := metav1 . LabelSelectorAsSelector ( spec . Pods . Metric . Selector )
if err != nil {
2019-09-27 21:51:53 +00:00
condition := a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetPodsMetric" , err )
return 0 , "" , time . Time { } , condition , fmt . Errorf ( "failed to get pods metric value: %v" , err )
2019-08-30 18:33:25 +00:00
}
2019-09-27 21:51:53 +00:00
replicaCountProposal , timestampProposal , metricNameProposal , condition , err = a . computeStatusForPodsMetric ( specReplicas , spec , hpa , selector , status , metricSelector )
2019-08-30 18:33:25 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
return 0 , "" , time . Time { } , condition , fmt . Errorf ( "failed to get pods metric value: %v" , err )
2019-08-30 18:33:25 +00:00
}
case autoscalingv2 . ResourceMetricSourceType :
2019-09-27 21:51:53 +00:00
replicaCountProposal , timestampProposal , metricNameProposal , condition , err = a . computeStatusForResourceMetric ( specReplicas , spec , hpa , selector , status )
2019-08-30 18:33:25 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
return 0 , "" , time . Time { } , condition , err
2019-08-30 18:33:25 +00:00
}
2020-12-01 01:06:26 +00:00
case autoscalingv2 . ContainerResourceMetricSourceType :
replicaCountProposal , timestampProposal , metricNameProposal , condition , err = a . computeStatusForContainerResourceMetric ( specReplicas , spec , hpa , selector , status )
if err != nil {
return 0 , "" , time . Time { } , condition , err
}
2019-08-30 18:33:25 +00:00
case autoscalingv2 . ExternalMetricSourceType :
2019-09-27 21:51:53 +00:00
replicaCountProposal , timestampProposal , metricNameProposal , condition , err = a . computeStatusForExternalMetric ( specReplicas , statusReplicas , spec , hpa , selector , status )
2019-08-30 18:33:25 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
return 0 , "" , time . Time { } , condition , err
2019-08-30 18:33:25 +00:00
}
default :
errMsg := fmt . Sprintf ( "unknown metric source type %q" , string ( spec . Type ) )
2019-09-27 21:51:53 +00:00
err = fmt . Errorf ( errMsg )
condition := a . getUnableComputeReplicaCountCondition ( hpa , "InvalidMetricSourceType" , err )
return 0 , "" , time . Time { } , condition , err
2019-08-30 18:33:25 +00:00
}
2019-09-27 21:51:53 +00:00
return replicaCountProposal , metricNameProposal , timestampProposal , autoscalingv2 . HorizontalPodAutoscalerCondition { } , nil
2019-08-30 18:33:25 +00:00
}
2019-01-22 20:53:35 +00:00
func ( a * HorizontalController ) reconcileKey ( key string ) ( deleted bool , err error ) {
2019-01-12 04:58:27 +00:00
namespace , name , err := cache . SplitMetaNamespaceKey ( key )
if err != nil {
2019-01-22 20:53:35 +00:00
return true , err
2019-01-12 04:58:27 +00:00
}
hpa , err := a . hpaLister . HorizontalPodAutoscalers ( namespace ) . Get ( name )
if errors . IsNotFound ( err ) {
klog . Infof ( "Horizontal Pod Autoscaler %s has been deleted in %s" , name , namespace )
delete ( a . recommendations , key )
2020-03-26 21:07:15 +00:00
delete ( a . scaleUpEvents , key )
delete ( a . scaleDownEvents , key )
2019-01-22 20:53:35 +00:00
return true , nil
2019-01-12 04:58:27 +00:00
}
2020-08-10 17:43:49 +00:00
if err != nil {
return false , err
}
2019-01-12 04:58:27 +00:00
2019-01-22 20:53:35 +00:00
return false , a . reconcileAutoscaler ( hpa , key )
2019-01-12 04:58:27 +00:00
}
// computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
2019-09-27 21:51:53 +00:00
func ( a * HorizontalController ) computeStatusForObjectMetric ( specReplicas , statusReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler , selector labels . Selector , status * autoscalingv2 . MetricStatus , metricSelector labels . Selector ) ( replicas int32 , timestamp time . Time , metricName string , condition autoscalingv2 . HorizontalPodAutoscalerCondition , err error ) {
2019-04-07 17:07:55 +00:00
if metricSpec . Object . Target . Type == autoscalingv2 . ValueMetricType {
2019-07-14 07:58:54 +00:00
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetObjectMetricReplicas ( specReplicas , metricSpec . Object . Target . Value . MilliValue ( ) , metricSpec . Object . Metric . Name , hpa . Namespace , & metricSpec . Object . DescribedObject , selector , metricSelector )
2019-04-07 17:07:55 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
condition := a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetObjectMetric" , err )
return 0 , timestampProposal , "" , condition , err
2019-04-07 17:07:55 +00:00
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricStatus {
DescribedObject : metricSpec . Object . DescribedObject ,
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . Object . Metric . Name ,
Selector : metricSpec . Object . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
Value : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
2019-01-12 04:58:27 +00:00
} ,
2019-04-07 17:07:55 +00:00
}
2019-09-27 21:51:53 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "%s metric %s" , metricSpec . Object . DescribedObject . Kind , metricSpec . Object . Metric . Name ) , autoscalingv2 . HorizontalPodAutoscalerCondition { } , nil
2019-04-07 17:07:55 +00:00
} else if metricSpec . Object . Target . Type == autoscalingv2 . AverageValueMetricType {
2019-07-14 07:58:54 +00:00
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetObjectPerPodMetricReplicas ( statusReplicas , metricSpec . Object . Target . AverageValue . MilliValue ( ) , metricSpec . Object . Metric . Name , hpa . Namespace , & metricSpec . Object . DescribedObject , metricSelector )
2019-04-07 17:07:55 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
condition := a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetObjectMetric" , err )
return 0 , time . Time { } , "" , condition , fmt . Errorf ( "failed to get %s object metric: %v" , metricSpec . Object . Metric . Name , err )
2019-04-07 17:07:55 +00:00
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ObjectMetricSourceType ,
Object : & autoscalingv2 . ObjectMetricStatus {
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . Object . Metric . Name ,
Selector : metricSpec . Object . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
AverageValue : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
2019-01-12 04:58:27 +00:00
} ,
2019-04-07 17:07:55 +00:00
}
2019-09-27 21:51:53 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "external metric %s(%+v)" , metricSpec . Object . Metric . Name , metricSpec . Object . Metric . Selector ) , autoscalingv2 . HorizontalPodAutoscalerCondition { } , nil
2019-01-12 04:58:27 +00:00
}
2019-04-07 17:07:55 +00:00
errMsg := "invalid object metric source: neither a value target nor an average value target was set"
2019-09-27 21:51:53 +00:00
err = fmt . Errorf ( errMsg )
condition = a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetObjectMetric" , err )
return 0 , time . Time { } , "" , condition , err
2019-01-12 04:58:27 +00:00
}
// computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType.
2019-09-27 21:51:53 +00:00
func ( a * HorizontalController ) computeStatusForPodsMetric ( currentReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler , selector labels . Selector , status * autoscalingv2 . MetricStatus , metricSelector labels . Selector ) ( replicaCountProposal int32 , timestampProposal time . Time , metricNameProposal string , condition autoscalingv2 . HorizontalPodAutoscalerCondition , err error ) {
2019-01-12 04:58:27 +00:00
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetMetricReplicas ( currentReplicas , metricSpec . Pods . Target . AverageValue . MilliValue ( ) , metricSpec . Pods . Metric . Name , hpa . Namespace , selector , metricSelector )
if err != nil {
2019-09-27 21:51:53 +00:00
condition = a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetPodsMetric" , err )
return 0 , timestampProposal , "" , condition , err
2019-01-12 04:58:27 +00:00
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . PodsMetricSourceType ,
Pods : & autoscalingv2 . PodsMetricStatus {
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . Pods . Metric . Name ,
Selector : metricSpec . Pods . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
AverageValue : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
} ,
}
2019-09-27 21:51:53 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "pods metric %s" , metricSpec . Pods . Metric . Name ) , autoscalingv2 . HorizontalPodAutoscalerCondition { } , nil
2019-01-12 04:58:27 +00:00
}
2020-12-01 01:06:26 +00:00
func ( a * HorizontalController ) computeStatusForResourceMetricGeneric ( currentReplicas int32 , target autoscalingv2 . MetricTarget ,
resourceName v1 . ResourceName , namespace string , container string , selector labels . Selector ) ( replicaCountProposal int32 ,
metricStatus * autoscalingv2 . MetricValueStatus , timestampProposal time . Time , metricNameProposal string ,
condition autoscalingv2 . HorizontalPodAutoscalerCondition , err error ) {
if target . AverageValue != nil {
2019-01-12 04:58:27 +00:00
var rawProposal int64
2020-12-01 01:06:26 +00:00
replicaCountProposal , rawProposal , timestampProposal , err := a . replicaCalc . GetRawResourceReplicas ( currentReplicas , target . AverageValue . MilliValue ( ) , resourceName , namespace , selector , container )
2019-01-12 04:58:27 +00:00
if err != nil {
2020-12-01 01:06:26 +00:00
return 0 , nil , time . Time { } , "" , condition , fmt . Errorf ( "failed to get %s utilization: %v" , resourceName , err )
2019-01-12 04:58:27 +00:00
}
2020-12-01 01:06:26 +00:00
metricNameProposal = fmt . Sprintf ( "%s resource" , resourceName . String ( ) )
status := autoscalingv2 . MetricValueStatus {
AverageValue : resource . NewMilliQuantity ( rawProposal , resource . DecimalSI ) ,
2019-01-12 04:58:27 +00:00
}
2020-12-01 01:06:26 +00:00
return replicaCountProposal , & status , timestampProposal , metricNameProposal , autoscalingv2 . HorizontalPodAutoscalerCondition { } , nil
2020-08-10 17:43:49 +00:00
}
2020-12-01 01:06:26 +00:00
if target . AverageUtilization == nil {
2020-08-10 17:43:49 +00:00
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
2020-12-01 01:06:26 +00:00
return 0 , nil , time . Time { } , "" , condition , fmt . Errorf ( errMsg )
}
targetUtilization := * target . AverageUtilization
replicaCountProposal , percentageProposal , rawProposal , timestampProposal , err := a . replicaCalc . GetResourceReplicas ( currentReplicas , targetUtilization , resourceName , namespace , selector , container )
if err != nil {
return 0 , nil , time . Time { } , "" , condition , fmt . Errorf ( "failed to get %s utilization: %v" , resourceName , err )
2020-08-10 17:43:49 +00:00
}
2020-12-01 01:06:26 +00:00
metricNameProposal = fmt . Sprintf ( "%s resource utilization (percentage of request)" , resourceName )
status := autoscalingv2 . MetricValueStatus {
AverageUtilization : & percentageProposal ,
AverageValue : resource . NewMilliQuantity ( rawProposal , resource . DecimalSI ) ,
}
return replicaCountProposal , & status , timestampProposal , metricNameProposal , autoscalingv2 . HorizontalPodAutoscalerCondition { } , nil
}
// computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
func ( a * HorizontalController ) computeStatusForResourceMetric ( currentReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler ,
selector labels . Selector , status * autoscalingv2 . MetricStatus ) ( replicaCountProposal int32 , timestampProposal time . Time ,
metricNameProposal string , condition autoscalingv2 . HorizontalPodAutoscalerCondition , err error ) {
replicaCountProposal , metricValueStatus , timestampProposal , metricNameProposal , condition , err := a . computeStatusForResourceMetricGeneric ( currentReplicas , metricSpec . Resource . Target , metricSpec . Resource . Name , hpa . Namespace , "" , selector )
2020-08-10 17:43:49 +00:00
if err != nil {
condition = a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetResourceMetric" , err )
2020-12-01 01:06:26 +00:00
return replicaCountProposal , timestampProposal , metricNameProposal , condition , err
2020-08-10 17:43:49 +00:00
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ResourceMetricSourceType ,
Resource : & autoscalingv2 . ResourceMetricStatus {
2020-12-01 01:06:26 +00:00
Name : metricSpec . Resource . Name ,
Current : * metricValueStatus ,
} ,
}
return replicaCountProposal , timestampProposal , metricNameProposal , condition , nil
}
// computeStatusForContainerResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
func ( a * HorizontalController ) computeStatusForContainerResourceMetric ( currentReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler ,
selector labels . Selector , status * autoscalingv2 . MetricStatus ) ( replicaCountProposal int32 , timestampProposal time . Time ,
metricNameProposal string , condition autoscalingv2 . HorizontalPodAutoscalerCondition , err error ) {
replicaCountProposal , metricValueStatus , timestampProposal , metricNameProposal , condition , err := a . computeStatusForResourceMetricGeneric ( currentReplicas , metricSpec . ContainerResource . Target , metricSpec . ContainerResource . Name , hpa . Namespace , metricSpec . ContainerResource . Container , selector )
if err != nil {
condition = a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetContainerResourceMetric" , err )
return replicaCountProposal , timestampProposal , metricNameProposal , condition , err
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ContainerResourceMetricSourceType ,
ContainerResource : & autoscalingv2 . ContainerResourceMetricStatus {
Name : metricSpec . ContainerResource . Name ,
Container : metricSpec . ContainerResource . Container ,
Current : * metricValueStatus ,
2020-08-10 17:43:49 +00:00
} ,
2019-01-12 04:58:27 +00:00
}
2020-12-01 01:06:26 +00:00
return replicaCountProposal , timestampProposal , metricNameProposal , condition , nil
2019-01-12 04:58:27 +00:00
}
// computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
2019-09-27 21:51:53 +00:00
func ( a * HorizontalController ) computeStatusForExternalMetric ( specReplicas , statusReplicas int32 , metricSpec autoscalingv2 . MetricSpec , hpa * autoscalingv2 . HorizontalPodAutoscaler , selector labels . Selector , status * autoscalingv2 . MetricStatus ) ( replicaCountProposal int32 , timestampProposal time . Time , metricNameProposal string , condition autoscalingv2 . HorizontalPodAutoscalerCondition , err error ) {
2019-01-12 04:58:27 +00:00
if metricSpec . External . Target . AverageValue != nil {
2019-07-14 07:58:54 +00:00
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetExternalPerPodMetricReplicas ( statusReplicas , metricSpec . External . Target . AverageValue . MilliValue ( ) , metricSpec . External . Metric . Name , hpa . Namespace , metricSpec . External . Metric . Selector )
2019-01-12 04:58:27 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
condition = a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetExternalMetric" , err )
return 0 , time . Time { } , "" , condition , fmt . Errorf ( "failed to get %s external metric: %v" , metricSpec . External . Metric . Name , err )
2019-01-12 04:58:27 +00:00
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricStatus {
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . External . Metric . Name ,
Selector : metricSpec . External . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
AverageValue : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
} ,
}
2019-09-27 21:51:53 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "external metric %s(%+v)" , metricSpec . External . Metric . Name , metricSpec . External . Metric . Selector ) , autoscalingv2 . HorizontalPodAutoscalerCondition { } , nil
2019-01-12 04:58:27 +00:00
}
if metricSpec . External . Target . Value != nil {
2019-07-14 07:58:54 +00:00
replicaCountProposal , utilizationProposal , timestampProposal , err := a . replicaCalc . GetExternalMetricReplicas ( specReplicas , metricSpec . External . Target . Value . MilliValue ( ) , metricSpec . External . Metric . Name , hpa . Namespace , metricSpec . External . Metric . Selector , selector )
2019-01-12 04:58:27 +00:00
if err != nil {
2019-09-27 21:51:53 +00:00
condition = a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetExternalMetric" , err )
return 0 , time . Time { } , "" , condition , fmt . Errorf ( "failed to get external metric %s: %v" , metricSpec . External . Metric . Name , err )
2019-01-12 04:58:27 +00:00
}
* status = autoscalingv2 . MetricStatus {
Type : autoscalingv2 . ExternalMetricSourceType ,
External : & autoscalingv2 . ExternalMetricStatus {
Metric : autoscalingv2 . MetricIdentifier {
Name : metricSpec . External . Metric . Name ,
Selector : metricSpec . External . Metric . Selector ,
} ,
Current : autoscalingv2 . MetricValueStatus {
Value : resource . NewMilliQuantity ( utilizationProposal , resource . DecimalSI ) ,
} ,
} ,
}
2019-09-27 21:51:53 +00:00
return replicaCountProposal , timestampProposal , fmt . Sprintf ( "external metric %s(%+v)" , metricSpec . External . Metric . Name , metricSpec . External . Metric . Selector ) , autoscalingv2 . HorizontalPodAutoscalerCondition { } , nil
2019-01-12 04:58:27 +00:00
}
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
2019-09-27 21:51:53 +00:00
err = fmt . Errorf ( errMsg )
condition = a . getUnableComputeReplicaCountCondition ( hpa , "FailedGetExternalMetric" , err )
return 0 , time . Time { } , "" , condition , fmt . Errorf ( errMsg )
2019-01-12 04:58:27 +00:00
}
func ( a * HorizontalController ) recordInitialRecommendation ( currentReplicas int32 , key string ) {
if a . recommendations [ key ] == nil {
a . recommendations [ key ] = [ ] timestampedRecommendation { { currentReplicas , time . Now ( ) } }
}
}
func ( a * HorizontalController ) reconcileAutoscaler ( hpav1Shared * autoscalingv1 . HorizontalPodAutoscaler , key string ) error {
// make a copy so that we never mutate the shared informer cache (conversion can mutate the object)
hpav1 := hpav1Shared . DeepCopy ( )
// then, convert to autoscaling/v2, which makes our lives easier when calculating metrics
hpaRaw , err := unsafeConvertToVersionVia ( hpav1 , autoscalingv2 . SchemeGroupVersion )
if err != nil {
a . eventRecorder . Event ( hpav1 , v1 . EventTypeWarning , "FailedConvertHPA" , err . Error ( ) )
return fmt . Errorf ( "failed to convert the given HPA to %s: %v" , autoscalingv2 . SchemeGroupVersion . String ( ) , err )
}
hpa := hpaRaw . ( * autoscalingv2 . HorizontalPodAutoscaler )
hpaStatusOriginal := hpa . Status . DeepCopy ( )
reference := fmt . Sprintf ( "%s/%s/%s" , hpa . Spec . ScaleTargetRef . Kind , hpa . Namespace , hpa . Spec . ScaleTargetRef . Name )
targetGV , err := schema . ParseGroupVersion ( hpa . Spec . ScaleTargetRef . APIVersion )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetScale" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionFalse , "FailedGetScale" , "the HPA controller was unable to get the target's current scale: %v" , err )
a . updateStatusIfNeeded ( hpaStatusOriginal , hpa )
return fmt . Errorf ( "invalid API version in scale target reference: %v" , err )
}
targetGK := schema . GroupKind {
Group : targetGV . Group ,
Kind : hpa . Spec . ScaleTargetRef . Kind ,
}
mappings , err := a . mapper . RESTMappings ( targetGK )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetScale" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionFalse , "FailedGetScale" , "the HPA controller was unable to get the target's current scale: %v" , err )
a . updateStatusIfNeeded ( hpaStatusOriginal , hpa )
return fmt . Errorf ( "unable to determine resource for scale target reference: %v" , err )
}
scale , targetGR , err := a . scaleForResourceMappings ( hpa . Namespace , hpa . Spec . ScaleTargetRef . Name , mappings )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedGetScale" , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionFalse , "FailedGetScale" , "the HPA controller was unable to get the target's current scale: %v" , err )
a . updateStatusIfNeeded ( hpaStatusOriginal , hpa )
return fmt . Errorf ( "failed to query scale subresource for %s: %v" , reference , err )
}
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "SucceededGetScale" , "the HPA controller was able to get the target's current scale" )
2019-07-14 07:58:54 +00:00
currentReplicas := scale . Spec . Replicas
2019-01-12 04:58:27 +00:00
a . recordInitialRecommendation ( currentReplicas , key )
2019-08-30 18:33:25 +00:00
var (
metricStatuses [ ] autoscalingv2 . MetricStatus
metricDesiredReplicas int32
metricName string
)
2019-01-12 04:58:27 +00:00
desiredReplicas := int32 ( 0 )
rescaleReason := ""
2019-09-27 21:51:53 +00:00
var minReplicas int32
if hpa . Spec . MinReplicas != nil {
minReplicas = * hpa . Spec . MinReplicas
} else {
// Default value
minReplicas = 1
}
2019-01-12 04:58:27 +00:00
rescale := true
2019-09-27 21:51:53 +00:00
if scale . Spec . Replicas == 0 && minReplicas != 0 {
2019-01-12 04:58:27 +00:00
// Autoscaling is disabled for this resource
desiredReplicas = 0
rescale = false
setCondition ( hpa , autoscalingv2 . ScalingActive , v1 . ConditionFalse , "ScalingDisabled" , "scaling is disabled since the replica count of the target is zero" )
} else if currentReplicas > hpa . Spec . MaxReplicas {
rescaleReason = "Current number of replicas above Spec.MaxReplicas"
desiredReplicas = hpa . Spec . MaxReplicas
2019-09-27 21:51:53 +00:00
} else if currentReplicas < minReplicas {
2019-01-12 04:58:27 +00:00
rescaleReason = "Current number of replicas below Spec.MinReplicas"
2019-09-27 21:51:53 +00:00
desiredReplicas = minReplicas
2019-01-12 04:58:27 +00:00
} else {
2019-08-30 18:33:25 +00:00
var metricTimestamp time . Time
2019-01-12 04:58:27 +00:00
metricDesiredReplicas , metricName , metricStatuses , metricTimestamp , err = a . computeReplicasForMetrics ( hpa , scale , hpa . Spec . Metrics )
if err != nil {
a . setCurrentReplicasInStatus ( hpa , currentReplicas )
if err := a . updateStatusIfNeeded ( hpaStatusOriginal , hpa ) ; err != nil {
utilruntime . HandleError ( err )
}
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedComputeMetricsReplicas" , err . Error ( ) )
return fmt . Errorf ( "failed to compute desired number of replicas based on listed metrics for %s: %v" , reference , err )
}
2019-08-30 18:33:25 +00:00
klog . V ( 4 ) . Infof ( "proposing %v desired replicas (based on %s from %s) for %s" , metricDesiredReplicas , metricName , metricTimestamp , reference )
2019-01-12 04:58:27 +00:00
rescaleMetric := ""
if metricDesiredReplicas > desiredReplicas {
desiredReplicas = metricDesiredReplicas
rescaleMetric = metricName
}
if desiredReplicas > currentReplicas {
rescaleReason = fmt . Sprintf ( "%s above target" , rescaleMetric )
}
if desiredReplicas < currentReplicas {
rescaleReason = "All metrics below target"
}
2020-03-26 21:07:15 +00:00
if hpa . Spec . Behavior == nil {
desiredReplicas = a . normalizeDesiredReplicas ( hpa , key , currentReplicas , desiredReplicas , minReplicas )
} else {
desiredReplicas = a . normalizeDesiredReplicasWithBehaviors ( hpa , key , currentReplicas , desiredReplicas , minReplicas )
}
2019-01-12 04:58:27 +00:00
rescale = desiredReplicas != currentReplicas
}
if rescale {
scale . Spec . Replicas = desiredReplicas
2020-03-26 21:07:15 +00:00
_ , err = a . scaleNamespacer . Scales ( hpa . Namespace ) . Update ( context . TODO ( ) , targetGR , scale , metav1 . UpdateOptions { } )
2019-01-12 04:58:27 +00:00
if err != nil {
a . eventRecorder . Eventf ( hpa , v1 . EventTypeWarning , "FailedRescale" , "New size: %d; reason: %s; error: %v" , desiredReplicas , rescaleReason , err . Error ( ) )
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionFalse , "FailedUpdateScale" , "the HPA controller was unable to update the target scale: %v" , err )
a . setCurrentReplicasInStatus ( hpa , currentReplicas )
if err := a . updateStatusIfNeeded ( hpaStatusOriginal , hpa ) ; err != nil {
utilruntime . HandleError ( err )
}
return fmt . Errorf ( "failed to rescale %s: %v" , reference , err )
}
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "SucceededRescale" , "the HPA controller was able to update the target scale to %d" , desiredReplicas )
a . eventRecorder . Eventf ( hpa , v1 . EventTypeNormal , "SuccessfulRescale" , "New size: %d; reason: %s" , desiredReplicas , rescaleReason )
2020-03-26 21:07:15 +00:00
a . storeScaleEvent ( hpa . Spec . Behavior , key , currentReplicas , desiredReplicas )
2019-01-12 04:58:27 +00:00
klog . Infof ( "Successful rescale of %s, old size: %d, new size: %d, reason: %s" ,
hpa . Name , currentReplicas , desiredReplicas , rescaleReason )
} else {
klog . V ( 4 ) . Infof ( "decided not to scale %s to %v (last scale time was %s)" , reference , desiredReplicas , hpa . Status . LastScaleTime )
desiredReplicas = currentReplicas
}
a . setStatus ( hpa , currentReplicas , desiredReplicas , metricStatuses , rescale )
return a . updateStatusIfNeeded ( hpaStatusOriginal , hpa )
}
// stabilizeRecommendation:
// - replaces old recommendation with the newest recommendation,
// - returns max of recommendations that are not older than downscaleStabilisationWindow.
func ( a * HorizontalController ) stabilizeRecommendation ( key string , prenormalizedDesiredReplicas int32 ) int32 {
maxRecommendation := prenormalizedDesiredReplicas
foundOldSample := false
oldSampleIndex := 0
cutoff := time . Now ( ) . Add ( - a . downscaleStabilisationWindow )
for i , rec := range a . recommendations [ key ] {
if rec . timestamp . Before ( cutoff ) {
foundOldSample = true
oldSampleIndex = i
} else if rec . recommendation > maxRecommendation {
maxRecommendation = rec . recommendation
}
}
if foundOldSample {
a . recommendations [ key ] [ oldSampleIndex ] = timestampedRecommendation { prenormalizedDesiredReplicas , time . Now ( ) }
} else {
a . recommendations [ key ] = append ( a . recommendations [ key ] , timestampedRecommendation { prenormalizedDesiredReplicas , time . Now ( ) } )
}
return maxRecommendation
}
// normalizeDesiredReplicas takes the metrics desired replicas value and normalizes it based on the appropriate conditions (i.e. < maxReplicas, >
// minReplicas, etc...)
2019-09-27 21:51:53 +00:00
func ( a * HorizontalController ) normalizeDesiredReplicas ( hpa * autoscalingv2 . HorizontalPodAutoscaler , key string , currentReplicas int32 , prenormalizedDesiredReplicas int32 , minReplicas int32 ) int32 {
2019-01-12 04:58:27 +00:00
stabilizedRecommendation := a . stabilizeRecommendation ( key , prenormalizedDesiredReplicas )
if stabilizedRecommendation != prenormalizedDesiredReplicas {
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "ScaleDownStabilized" , "recent recommendations were higher than current one, applying the highest recent recommendation" )
} else {
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "ReadyForNewScale" , "recommended size matches current size" )
}
desiredReplicas , condition , reason := convertDesiredReplicasWithRules ( currentReplicas , stabilizedRecommendation , minReplicas , hpa . Spec . MaxReplicas )
if desiredReplicas == stabilizedRecommendation {
setCondition ( hpa , autoscalingv2 . ScalingLimited , v1 . ConditionFalse , condition , reason )
} else {
setCondition ( hpa , autoscalingv2 . ScalingLimited , v1 . ConditionTrue , condition , reason )
}
return desiredReplicas
}
2020-03-26 21:07:15 +00:00
// NormalizationArg is used to pass all needed information between functions as one structure
type NormalizationArg struct {
Key string
ScaleUpBehavior * autoscalingv2 . HPAScalingRules
ScaleDownBehavior * autoscalingv2 . HPAScalingRules
MinReplicas int32
MaxReplicas int32
CurrentReplicas int32
DesiredReplicas int32
}
// normalizeDesiredReplicasWithBehaviors takes the metrics desired replicas value and normalizes it:
// 1. Apply the basic conditions (i.e. < maxReplicas, > minReplicas, etc...)
// 2. Apply the scale up/down limits from the hpaSpec.Behaviors (i.e. add no more than 4 pods)
// 3. Apply the constraints period (i.e. add no more than 4 pods per minute)
// 4. Apply the stabilization (i.e. add no more than 4 pods per minute, and pick the smallest recommendation during last 5 minutes)
func ( a * HorizontalController ) normalizeDesiredReplicasWithBehaviors ( hpa * autoscalingv2 . HorizontalPodAutoscaler , key string , currentReplicas , prenormalizedDesiredReplicas , minReplicas int32 ) int32 {
a . maybeInitScaleDownStabilizationWindow ( hpa )
normalizationArg := NormalizationArg {
Key : key ,
ScaleUpBehavior : hpa . Spec . Behavior . ScaleUp ,
ScaleDownBehavior : hpa . Spec . Behavior . ScaleDown ,
MinReplicas : minReplicas ,
MaxReplicas : hpa . Spec . MaxReplicas ,
CurrentReplicas : currentReplicas ,
DesiredReplicas : prenormalizedDesiredReplicas }
stabilizedRecommendation , reason , message := a . stabilizeRecommendationWithBehaviors ( normalizationArg )
normalizationArg . DesiredReplicas = stabilizedRecommendation
if stabilizedRecommendation != prenormalizedDesiredReplicas {
// "ScaleUpStabilized" || "ScaleDownStabilized"
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , reason , message )
} else {
setCondition ( hpa , autoscalingv2 . AbleToScale , v1 . ConditionTrue , "ReadyForNewScale" , "recommended size matches current size" )
}
desiredReplicas , reason , message := a . convertDesiredReplicasWithBehaviorRate ( normalizationArg )
if desiredReplicas == stabilizedRecommendation {
setCondition ( hpa , autoscalingv2 . ScalingLimited , v1 . ConditionFalse , reason , message )
} else {
setCondition ( hpa , autoscalingv2 . ScalingLimited , v1 . ConditionTrue , reason , message )
}
return desiredReplicas
}
func ( a * HorizontalController ) maybeInitScaleDownStabilizationWindow ( hpa * autoscalingv2 . HorizontalPodAutoscaler ) {
behavior := hpa . Spec . Behavior
if behavior != nil && behavior . ScaleDown != nil && behavior . ScaleDown . StabilizationWindowSeconds == nil {
stabilizationWindowSeconds := ( int32 ) ( a . downscaleStabilisationWindow . Seconds ( ) )
hpa . Spec . Behavior . ScaleDown . StabilizationWindowSeconds = & stabilizationWindowSeconds
}
}
// getReplicasChangePerPeriod function find all the replica changes per period
func getReplicasChangePerPeriod ( periodSeconds int32 , scaleEvents [ ] timestampedScaleEvent ) int32 {
period := time . Second * time . Duration ( periodSeconds )
cutoff := time . Now ( ) . Add ( - period )
var replicas int32
for _ , rec := range scaleEvents {
if rec . timestamp . After ( cutoff ) {
replicas += rec . replicaChange
}
}
return replicas
}
2020-12-01 01:06:26 +00:00
func ( a * HorizontalController ) getUnableComputeReplicaCountCondition ( hpa runtime . Object , reason string , err error ) ( condition autoscalingv2 . HorizontalPodAutoscalerCondition ) {
2019-09-27 21:51:53 +00:00
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , reason , err . Error ( ) )
return autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : autoscalingv2 . ScalingActive ,
Status : v1 . ConditionFalse ,
Reason : reason ,
Message : fmt . Sprintf ( "the HPA was unable to compute the replica count: %v" , err ) ,
}
}
2020-03-26 21:07:15 +00:00
// storeScaleEvent stores (adds or replaces outdated) scale event.
// outdated events to be replaced were marked as outdated in the `markScaleEventsOutdated` function
func ( a * HorizontalController ) storeScaleEvent ( behavior * autoscalingv2 . HorizontalPodAutoscalerBehavior , key string , prevReplicas , newReplicas int32 ) {
if behavior == nil {
return // we should not store any event as they will not be used
}
var oldSampleIndex int
var longestPolicyPeriod int32
foundOldSample := false
if newReplicas > prevReplicas {
longestPolicyPeriod = getLongestPolicyPeriod ( behavior . ScaleUp )
markScaleEventsOutdated ( a . scaleUpEvents [ key ] , longestPolicyPeriod )
replicaChange := newReplicas - prevReplicas
for i , event := range a . scaleUpEvents [ key ] {
if event . outdated {
foundOldSample = true
oldSampleIndex = i
}
}
newEvent := timestampedScaleEvent { replicaChange , time . Now ( ) , false }
if foundOldSample {
a . scaleUpEvents [ key ] [ oldSampleIndex ] = newEvent
} else {
a . scaleUpEvents [ key ] = append ( a . scaleUpEvents [ key ] , newEvent )
}
} else {
longestPolicyPeriod = getLongestPolicyPeriod ( behavior . ScaleDown )
markScaleEventsOutdated ( a . scaleDownEvents [ key ] , longestPolicyPeriod )
replicaChange := prevReplicas - newReplicas
for i , event := range a . scaleDownEvents [ key ] {
if event . outdated {
foundOldSample = true
oldSampleIndex = i
}
}
newEvent := timestampedScaleEvent { replicaChange , time . Now ( ) , false }
if foundOldSample {
a . scaleDownEvents [ key ] [ oldSampleIndex ] = newEvent
} else {
a . scaleDownEvents [ key ] = append ( a . scaleDownEvents [ key ] , newEvent )
}
}
}
// stabilizeRecommendationWithBehaviors:
// - replaces old recommendation with the newest recommendation,
// - returns {max,min} of recommendations that are not older than constraints.Scale{Up,Down}.DelaySeconds
func ( a * HorizontalController ) stabilizeRecommendationWithBehaviors ( args NormalizationArg ) ( int32 , string , string ) {
2021-03-18 22:40:29 +00:00
now := time . Now ( )
2020-03-26 21:07:15 +00:00
foundOldSample := false
oldSampleIndex := 0
2021-03-18 22:40:29 +00:00
upRecommendation := args . DesiredReplicas
upDelaySeconds := * args . ScaleUpBehavior . StabilizationWindowSeconds
upCutoff := now . Add ( - time . Second * time . Duration ( upDelaySeconds ) )
2020-03-26 21:07:15 +00:00
2021-03-18 22:40:29 +00:00
downRecommendation := args . DesiredReplicas
downDelaySeconds := * args . ScaleDownBehavior . StabilizationWindowSeconds
downCutoff := now . Add ( - time . Second * time . Duration ( downDelaySeconds ) )
2020-03-26 21:07:15 +00:00
2021-03-18 22:40:29 +00:00
// Calculate the upper and lower stabilization limits.
2020-03-26 21:07:15 +00:00
for i , rec := range a . recommendations [ args . Key ] {
2021-03-18 22:40:29 +00:00
if rec . timestamp . After ( upCutoff ) {
upRecommendation = min ( rec . recommendation , upRecommendation )
}
if rec . timestamp . After ( downCutoff ) {
downRecommendation = max ( rec . recommendation , downRecommendation )
2020-03-26 21:07:15 +00:00
}
2021-03-18 22:40:29 +00:00
if rec . timestamp . Before ( upCutoff ) && rec . timestamp . Before ( downCutoff ) {
2020-03-26 21:07:15 +00:00
foundOldSample = true
oldSampleIndex = i
}
}
2021-03-18 22:40:29 +00:00
// Bring the recommendation to within the upper and lower limits (stabilize).
recommendation := args . CurrentReplicas
if recommendation < upRecommendation {
recommendation = upRecommendation
}
if recommendation > downRecommendation {
recommendation = downRecommendation
}
// Record the unstabilized recommendation.
2020-03-26 21:07:15 +00:00
if foundOldSample {
a . recommendations [ args . Key ] [ oldSampleIndex ] = timestampedRecommendation { args . DesiredReplicas , time . Now ( ) }
} else {
a . recommendations [ args . Key ] = append ( a . recommendations [ args . Key ] , timestampedRecommendation { args . DesiredReplicas , time . Now ( ) } )
}
2021-03-18 22:40:29 +00:00
// Determine a human-friendly message.
var reason , message string
if args . DesiredReplicas >= args . CurrentReplicas {
reason = "ScaleUpStabilized"
message = "recent recommendations were lower than current one, applying the lowest recent recommendation"
} else {
reason = "ScaleDownStabilized"
message = "recent recommendations were higher than current one, applying the highest recent recommendation"
}
2020-03-26 21:07:15 +00:00
return recommendation , reason , message
}
// convertDesiredReplicasWithBehaviorRate performs the actual normalization, given the constraint rate
// It doesn't consider the stabilizationWindow, it is done separately
func ( a * HorizontalController ) convertDesiredReplicasWithBehaviorRate ( args NormalizationArg ) ( int32 , string , string ) {
var possibleLimitingReason , possibleLimitingMessage string
if args . DesiredReplicas > args . CurrentReplicas {
scaleUpLimit := calculateScaleUpLimitWithScalingRules ( args . CurrentReplicas , a . scaleUpEvents [ args . Key ] , args . ScaleUpBehavior )
if scaleUpLimit < args . CurrentReplicas {
// We shouldn't scale up further until the scaleUpEvents will be cleaned up
scaleUpLimit = args . CurrentReplicas
}
maximumAllowedReplicas := args . MaxReplicas
if maximumAllowedReplicas > scaleUpLimit {
maximumAllowedReplicas = scaleUpLimit
possibleLimitingReason = "ScaleUpLimit"
possibleLimitingMessage = "the desired replica count is increasing faster than the maximum scale rate"
} else {
possibleLimitingReason = "TooManyReplicas"
possibleLimitingMessage = "the desired replica count is more than the maximum replica count"
}
if args . DesiredReplicas > maximumAllowedReplicas {
return maximumAllowedReplicas , possibleLimitingReason , possibleLimitingMessage
}
} else if args . DesiredReplicas < args . CurrentReplicas {
scaleDownLimit := calculateScaleDownLimitWithBehaviors ( args . CurrentReplicas , a . scaleDownEvents [ args . Key ] , args . ScaleDownBehavior )
if scaleDownLimit > args . CurrentReplicas {
// We shouldn't scale down further until the scaleDownEvents will be cleaned up
scaleDownLimit = args . CurrentReplicas
}
minimumAllowedReplicas := args . MinReplicas
if minimumAllowedReplicas < scaleDownLimit {
minimumAllowedReplicas = scaleDownLimit
possibleLimitingReason = "ScaleDownLimit"
possibleLimitingMessage = "the desired replica count is decreasing faster than the maximum scale rate"
} else {
possibleLimitingMessage = "the desired replica count is less than the minimum replica count"
possibleLimitingReason = "TooFewReplicas"
}
if args . DesiredReplicas < minimumAllowedReplicas {
return minimumAllowedReplicas , possibleLimitingReason , possibleLimitingMessage
}
}
return args . DesiredReplicas , "DesiredWithinRange" , "the desired count is within the acceptable range"
}
2019-01-12 04:58:27 +00:00
// convertDesiredReplicas performs the actual normalization, without depending on `HorizontalController` or `HorizontalPodAutoscaler`
func convertDesiredReplicasWithRules ( currentReplicas , desiredReplicas , hpaMinReplicas , hpaMaxReplicas int32 ) ( int32 , string , string ) {
var minimumAllowedReplicas int32
var maximumAllowedReplicas int32
var possibleLimitingCondition string
var possibleLimitingReason string
2019-09-27 21:51:53 +00:00
minimumAllowedReplicas = hpaMinReplicas
2019-01-12 04:58:27 +00:00
// Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by
// bogus CPU usage report from heapster/kubelet (like in issue #32304).
scaleUpLimit := calculateScaleUpLimit ( currentReplicas )
if hpaMaxReplicas > scaleUpLimit {
maximumAllowedReplicas = scaleUpLimit
possibleLimitingCondition = "ScaleUpLimit"
possibleLimitingReason = "the desired replica count is increasing faster than the maximum scale rate"
} else {
maximumAllowedReplicas = hpaMaxReplicas
possibleLimitingCondition = "TooManyReplicas"
possibleLimitingReason = "the desired replica count is more than the maximum replica count"
}
if desiredReplicas < minimumAllowedReplicas {
possibleLimitingCondition = "TooFewReplicas"
2019-09-27 21:51:53 +00:00
possibleLimitingReason = "the desired replica count is less than the minimum replica count"
2019-01-12 04:58:27 +00:00
return minimumAllowedReplicas , possibleLimitingCondition , possibleLimitingReason
} else if desiredReplicas > maximumAllowedReplicas {
return maximumAllowedReplicas , possibleLimitingCondition , possibleLimitingReason
}
return desiredReplicas , "DesiredWithinRange" , "the desired count is within the acceptable range"
}
func calculateScaleUpLimit ( currentReplicas int32 ) int32 {
return int32 ( math . Max ( scaleUpLimitFactor * float64 ( currentReplicas ) , scaleUpLimitMinimum ) )
}
2020-03-26 21:07:15 +00:00
// markScaleEventsOutdated set 'outdated=true' flag for all scale events that are not used by any HPA object
func markScaleEventsOutdated ( scaleEvents [ ] timestampedScaleEvent , longestPolicyPeriod int32 ) {
period := time . Second * time . Duration ( longestPolicyPeriod )
cutoff := time . Now ( ) . Add ( - period )
for i , event := range scaleEvents {
if event . timestamp . Before ( cutoff ) {
// outdated scale event are marked for later reuse
scaleEvents [ i ] . outdated = true
}
}
}
func getLongestPolicyPeriod ( scalingRules * autoscalingv2 . HPAScalingRules ) int32 {
var longestPolicyPeriod int32
for _ , policy := range scalingRules . Policies {
if policy . PeriodSeconds > longestPolicyPeriod {
longestPolicyPeriod = policy . PeriodSeconds
}
}
return longestPolicyPeriod
}
// calculateScaleUpLimitWithScalingRules returns the maximum number of pods that could be added for the given HPAScalingRules
func calculateScaleUpLimitWithScalingRules ( currentReplicas int32 , scaleEvents [ ] timestampedScaleEvent , scalingRules * autoscalingv2 . HPAScalingRules ) int32 {
2020-08-10 17:43:49 +00:00
var result int32
2020-03-26 21:07:15 +00:00
var proposed int32
var selectPolicyFn func ( int32 , int32 ) int32
if * scalingRules . SelectPolicy == autoscalingv2 . DisabledPolicySelect {
return currentReplicas // Scaling is disabled
} else if * scalingRules . SelectPolicy == autoscalingv2 . MinPolicySelect {
2020-11-14 08:06:46 +00:00
result = math . MaxInt32
2020-03-26 21:07:15 +00:00
selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value
} else {
2020-11-14 08:06:46 +00:00
result = math . MinInt32
2020-03-26 21:07:15 +00:00
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
}
for _ , policy := range scalingRules . Policies {
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod ( policy . PeriodSeconds , scaleEvents )
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod
if policy . Type == autoscalingv2 . PodsScalingPolicy {
2020-11-14 08:06:46 +00:00
proposed = periodStartReplicas + policy . Value
2020-03-26 21:07:15 +00:00
} else if policy . Type == autoscalingv2 . PercentScalingPolicy {
// the proposal has to be rounded up because the proposed change might not increase the replica count causing the target to never scale up
proposed = int32 ( math . Ceil ( float64 ( periodStartReplicas ) * ( 1 + float64 ( policy . Value ) / 100 ) ) )
}
result = selectPolicyFn ( result , proposed )
}
return result
}
// calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules
func calculateScaleDownLimitWithBehaviors ( currentReplicas int32 , scaleEvents [ ] timestampedScaleEvent , scalingRules * autoscalingv2 . HPAScalingRules ) int32 {
2020-11-14 08:06:46 +00:00
var result int32
2020-03-26 21:07:15 +00:00
var proposed int32
var selectPolicyFn func ( int32 , int32 ) int32
if * scalingRules . SelectPolicy == autoscalingv2 . DisabledPolicySelect {
return currentReplicas // Scaling is disabled
} else if * scalingRules . SelectPolicy == autoscalingv2 . MinPolicySelect {
2020-11-14 08:06:46 +00:00
result = math . MinInt32
2020-03-26 21:07:15 +00:00
selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value
} else {
2020-11-14 08:06:46 +00:00
result = math . MaxInt32
2020-03-26 21:07:15 +00:00
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
}
for _ , policy := range scalingRules . Policies {
replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod ( policy . PeriodSeconds , scaleEvents )
periodStartReplicas := currentReplicas + replicasDeletedInCurrentPeriod
if policy . Type == autoscalingv2 . PodsScalingPolicy {
proposed = periodStartReplicas - policy . Value
} else if policy . Type == autoscalingv2 . PercentScalingPolicy {
proposed = int32 ( float64 ( periodStartReplicas ) * ( 1 - float64 ( policy . Value ) / 100 ) )
}
result = selectPolicyFn ( result , proposed )
}
return result
}
2019-01-12 04:58:27 +00:00
// scaleForResourceMappings attempts to fetch the scale for the
// resource with the given name and namespace, trying each RESTMapping
// in turn until a working one is found. If none work, the first error
// is returned. It returns both the scale, as well as the group-resource from
// the working mapping.
func ( a * HorizontalController ) scaleForResourceMappings ( namespace , name string , mappings [ ] * apimeta . RESTMapping ) ( * autoscalingv1 . Scale , schema . GroupResource , error ) {
var firstErr error
for i , mapping := range mappings {
targetGR := mapping . Resource . GroupResource ( )
2020-03-26 21:07:15 +00:00
scale , err := a . scaleNamespacer . Scales ( namespace ) . Get ( context . TODO ( ) , targetGR , name , metav1 . GetOptions { } )
2019-01-12 04:58:27 +00:00
if err == nil {
return scale , targetGR , nil
}
// if this is the first error, remember it,
// then go on and try other mappings until we find a good one
if i == 0 {
firstErr = err
}
}
// make sure we handle an empty set of mappings
if firstErr == nil {
firstErr = fmt . Errorf ( "unrecognized resource" )
}
return nil , schema . GroupResource { } , firstErr
}
// setCurrentReplicasInStatus sets the current replica count in the status of the HPA.
func ( a * HorizontalController ) setCurrentReplicasInStatus ( hpa * autoscalingv2 . HorizontalPodAutoscaler , currentReplicas int32 ) {
a . setStatus ( hpa , currentReplicas , hpa . Status . DesiredReplicas , hpa . Status . CurrentMetrics , false )
}
// setStatus recreates the status of the given HPA, updating the current and
// desired replicas, as well as the metric statuses
func ( a * HorizontalController ) setStatus ( hpa * autoscalingv2 . HorizontalPodAutoscaler , currentReplicas , desiredReplicas int32 , metricStatuses [ ] autoscalingv2 . MetricStatus , rescale bool ) {
hpa . Status = autoscalingv2 . HorizontalPodAutoscalerStatus {
CurrentReplicas : currentReplicas ,
DesiredReplicas : desiredReplicas ,
LastScaleTime : hpa . Status . LastScaleTime ,
CurrentMetrics : metricStatuses ,
Conditions : hpa . Status . Conditions ,
}
if rescale {
now := metav1 . NewTime ( time . Now ( ) )
hpa . Status . LastScaleTime = & now
}
}
// updateStatusIfNeeded calls updateStatus only if the status of the new HPA is not the same as the old status
func ( a * HorizontalController ) updateStatusIfNeeded ( oldStatus * autoscalingv2 . HorizontalPodAutoscalerStatus , newHPA * autoscalingv2 . HorizontalPodAutoscaler ) error {
// skip a write if we wouldn't need to update
if apiequality . Semantic . DeepEqual ( oldStatus , & newHPA . Status ) {
return nil
}
return a . updateStatus ( newHPA )
}
// updateStatus actually does the update request for the status of the given HPA
func ( a * HorizontalController ) updateStatus ( hpa * autoscalingv2 . HorizontalPodAutoscaler ) error {
// convert back to autoscalingv1
hpaRaw , err := unsafeConvertToVersionVia ( hpa , autoscalingv1 . SchemeGroupVersion )
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedConvertHPA" , err . Error ( ) )
return fmt . Errorf ( "failed to convert the given HPA to %s: %v" , autoscalingv2 . SchemeGroupVersion . String ( ) , err )
}
hpav1 := hpaRaw . ( * autoscalingv1 . HorizontalPodAutoscaler )
2020-03-26 21:07:15 +00:00
_ , err = a . hpaNamespacer . HorizontalPodAutoscalers ( hpav1 . Namespace ) . UpdateStatus ( context . TODO ( ) , hpav1 , metav1 . UpdateOptions { } )
2019-01-12 04:58:27 +00:00
if err != nil {
a . eventRecorder . Event ( hpa , v1 . EventTypeWarning , "FailedUpdateStatus" , err . Error ( ) )
return fmt . Errorf ( "failed to update status for %s: %v" , hpa . Name , err )
}
klog . V ( 2 ) . Infof ( "Successfully updated status for %s" , hpa . Name )
return nil
}
// unsafeConvertToVersionVia is like Scheme.UnsafeConvertToVersion, but it does so via an internal version first.
// We use it since working with v2alpha1 is convenient here, but we want to use the v1 client (and
// can't just use the internal version). Note that conversion mutates the object, so you need to deepcopy
// *before* you call this if the input object came out of a shared cache.
func unsafeConvertToVersionVia ( obj runtime . Object , externalVersion schema . GroupVersion ) ( runtime . Object , error ) {
objInt , err := legacyscheme . Scheme . UnsafeConvertToVersion ( obj , schema . GroupVersion { Group : externalVersion . Group , Version : runtime . APIVersionInternal } )
if err != nil {
return nil , fmt . Errorf ( "failed to convert the given object to the internal version: %v" , err )
}
objExt , err := legacyscheme . Scheme . UnsafeConvertToVersion ( objInt , externalVersion )
if err != nil {
return nil , fmt . Errorf ( "failed to convert the given object back to the external version: %v" , err )
}
return objExt , err
}
// setCondition sets the specific condition type on the given HPA to the specified value with the given reason
// and message. The message and args are treated like a format string. The condition will be added if it is
// not present.
func setCondition ( hpa * autoscalingv2 . HorizontalPodAutoscaler , conditionType autoscalingv2 . HorizontalPodAutoscalerConditionType , status v1 . ConditionStatus , reason , message string , args ... interface { } ) {
hpa . Status . Conditions = setConditionInList ( hpa . Status . Conditions , conditionType , status , reason , message , args ... )
}
// setConditionInList sets the specific condition type on the given HPA to the specified value with the given
// reason and message. The message and args are treated like a format string. The condition will be added if
// it is not present. The new list will be returned.
func setConditionInList ( inputList [ ] autoscalingv2 . HorizontalPodAutoscalerCondition , conditionType autoscalingv2 . HorizontalPodAutoscalerConditionType , status v1 . ConditionStatus , reason , message string , args ... interface { } ) [ ] autoscalingv2 . HorizontalPodAutoscalerCondition {
resList := inputList
var existingCond * autoscalingv2 . HorizontalPodAutoscalerCondition
for i , condition := range resList {
if condition . Type == conditionType {
// can't take a pointer to an iteration variable
existingCond = & resList [ i ]
break
}
}
if existingCond == nil {
resList = append ( resList , autoscalingv2 . HorizontalPodAutoscalerCondition {
Type : conditionType ,
} )
existingCond = & resList [ len ( resList ) - 1 ]
}
if existingCond . Status != status {
existingCond . LastTransitionTime = metav1 . Now ( )
}
existingCond . Status = status
existingCond . Reason = reason
existingCond . Message = fmt . Sprintf ( message , args ... )
return resList
}
2020-03-26 21:07:15 +00:00
func max ( a , b int32 ) int32 {
if a >= b {
return a
}
2020-08-10 17:43:49 +00:00
return b
2020-03-26 21:07:15 +00:00
}
func min ( a , b int32 ) int32 {
if a <= b {
return a
}
2020-08-10 17:43:49 +00:00
return b
2020-03-26 21:07:15 +00:00
}