2015-08-17 12:18:26 +00:00
|
|
|
/*
|
|
|
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2015-09-10 13:10:07 +00:00
|
|
|
package podautoscaler
|
2015-08-17 12:18:26 +00:00
|
|
|
|
|
|
|
import (
|
2016-01-29 11:20:19 +00:00
|
|
|
"encoding/json"
|
2015-08-17 12:18:26 +00:00
|
|
|
"fmt"
|
2015-09-07 10:25:04 +00:00
|
|
|
"math"
|
2015-08-17 12:18:26 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/golang/glog"
|
|
|
|
"k8s.io/kubernetes/pkg/api"
|
2016-01-29 11:20:19 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/resource"
|
2015-09-17 22:21:55 +00:00
|
|
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
2015-10-09 22:04:41 +00:00
|
|
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
2015-09-14 13:08:43 +00:00
|
|
|
"k8s.io/kubernetes/pkg/client/record"
|
2016-02-16 17:54:53 +00:00
|
|
|
unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
|
|
|
unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
2015-09-10 13:10:07 +00:00
|
|
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
2016-02-02 10:57:06 +00:00
|
|
|
"k8s.io/kubernetes/pkg/util/wait"
|
2015-08-20 12:55:28 +00:00
|
|
|
)
|
|
|
|
|
2015-12-13 08:54:43 +00:00
|
|
|
const (
|
|
|
|
// Usage shoud exceed the tolerance before we start downscale or upscale the pods.
|
|
|
|
// TODO: make it a flag or HPA spec element.
|
|
|
|
tolerance = 0.1
|
2016-01-29 11:20:19 +00:00
|
|
|
|
2016-02-29 11:02:54 +00:00
|
|
|
defaultTargetCPUUtilizationPercentage = 80
|
|
|
|
|
2016-02-05 10:31:51 +00:00
|
|
|
HpaCustomMetricsTargetAnnotationName = "alpha/target.custom-metrics.podautoscaler.kubernetes.io"
|
|
|
|
HpaCustomMetricsStatusAnnotationName = "alpha/status.custom-metrics.podautoscaler.kubernetes.io"
|
2015-12-13 08:54:43 +00:00
|
|
|
)
|
|
|
|
|
2015-09-10 13:10:07 +00:00
|
|
|
type HorizontalController struct {
|
2016-02-16 17:54:53 +00:00
|
|
|
scaleNamespacer unversionedextensions.ScalesGetter
|
|
|
|
hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter
|
2015-11-02 15:18:53 +00:00
|
|
|
|
2015-12-13 08:54:43 +00:00
|
|
|
metricsClient metrics.MetricsClient
|
|
|
|
eventRecorder record.EventRecorder
|
2015-08-25 17:16:47 +00:00
|
|
|
}
|
|
|
|
|
2015-12-13 08:54:43 +00:00
|
|
|
var downscaleForbiddenWindow = 5 * time.Minute
|
|
|
|
var upscaleForbiddenWindow = 3 * time.Minute
|
|
|
|
|
2016-02-16 17:54:53 +00:00
|
|
|
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient) *HorizontalController {
|
2015-09-14 13:08:43 +00:00
|
|
|
broadcaster := record.NewBroadcaster()
|
2016-02-24 00:11:40 +00:00
|
|
|
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{evtNamespacer.Events("")})
|
2015-09-14 13:08:43 +00:00
|
|
|
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
|
|
|
|
2015-09-10 13:10:07 +00:00
|
|
|
return &HorizontalController{
|
2015-11-02 15:18:53 +00:00
|
|
|
metricsClient: metricsClient,
|
|
|
|
eventRecorder: recorder,
|
|
|
|
scaleNamespacer: scaleNamespacer,
|
|
|
|
hpaNamespacer: hpaNamespacer,
|
2015-08-17 12:18:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-10 13:10:07 +00:00
|
|
|
func (a *HorizontalController) Run(syncPeriod time.Duration) {
|
2016-02-02 10:57:06 +00:00
|
|
|
go wait.Until(func() {
|
2015-08-17 12:18:26 +00:00
|
|
|
if err := a.reconcileAutoscalers(); err != nil {
|
|
|
|
glog.Errorf("Couldn't reconcile horizontal pod autoscalers: %v", err)
|
|
|
|
}
|
2016-02-02 10:57:06 +00:00
|
|
|
}, syncPeriod, wait.NeverStop)
|
2015-08-17 12:18:26 +00:00
|
|
|
}
|
|
|
|
|
2015-12-02 08:24:17 +00:00
|
|
|
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int, *int, time.Time, error) {
|
2016-02-29 11:02:54 +00:00
|
|
|
targetUtilization := defaultTargetCPUUtilizationPercentage
|
|
|
|
if hpa.Spec.CPUUtilization != nil {
|
|
|
|
targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage
|
2015-10-13 15:24:23 +00:00
|
|
|
}
|
|
|
|
currentReplicas := scale.Status.Replicas
|
2015-12-02 08:24:17 +00:00
|
|
|
currentUtilization, timestamp, err := a.metricsClient.GetCPUUtilization(hpa.Namespace, scale.Status.Selector)
|
2015-10-13 15:24:23 +00:00
|
|
|
|
|
|
|
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
|
|
|
|
if err != nil {
|
2015-11-13 22:30:01 +00:00
|
|
|
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
|
2015-12-02 08:24:17 +00:00
|
|
|
return 0, nil, time.Time{}, fmt.Errorf("failed to get cpu utilization: %v", err)
|
2015-10-13 15:24:23 +00:00
|
|
|
}
|
|
|
|
|
2016-02-29 11:02:54 +00:00
|
|
|
usageRatio := float64(*currentUtilization) / float64(targetUtilization)
|
2015-12-13 08:54:43 +00:00
|
|
|
if math.Abs(1.0-usageRatio) > tolerance {
|
2015-12-02 08:24:17 +00:00
|
|
|
return int(math.Ceil(usageRatio * float64(currentReplicas))), currentUtilization, timestamp, nil
|
2015-10-13 15:24:23 +00:00
|
|
|
} else {
|
2015-12-02 08:24:17 +00:00
|
|
|
return currentReplicas, currentUtilization, timestamp, nil
|
2015-10-13 15:24:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-29 11:20:19 +00:00
|
|
|
// Computes the desired number of replicas based on the CustomMetrics passed in cmAnnotation as json-serialized
|
|
|
|
// extensions.CustomMetricsTargetList.
|
|
|
|
// Returns number of replicas, status string (also json-serialized extensions.CustomMetricsCurrentStatusList),
|
|
|
|
// last timestamp of the metrics involved in computations or error, if occurred.
|
|
|
|
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa extensions.HorizontalPodAutoscaler, scale *extensions.Scale,
|
|
|
|
cmAnnotation string) (int, string, time.Time, error) {
|
|
|
|
|
|
|
|
currentReplicas := scale.Status.Replicas
|
|
|
|
replicas := 0
|
|
|
|
timestamp := time.Time{}
|
|
|
|
|
|
|
|
if cmAnnotation == "" {
|
|
|
|
return 0, "", time.Time{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var targetList extensions.CustomMetricTargetList
|
|
|
|
if err := json.Unmarshal([]byte(cmAnnotation), &targetList); err != nil {
|
|
|
|
return 0, "", time.Time{}, fmt.Errorf("failed to parse custom metrics annotation: %v", err)
|
|
|
|
}
|
|
|
|
if len(targetList.Items) == 0 {
|
|
|
|
return 0, "", time.Time{}, fmt.Errorf("no custom metrics in annotation")
|
|
|
|
}
|
|
|
|
|
|
|
|
statusList := extensions.CustomMetricCurrentStatusList{
|
|
|
|
Items: make([]extensions.CustomMetricCurrentStatus, 0),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, customMetricTarget := range targetList.Items {
|
|
|
|
value, currentTimestamp, err := a.metricsClient.GetCustomMetric(customMetricTarget.Name, hpa.Namespace, scale.Status.Selector)
|
|
|
|
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
|
|
|
|
if err != nil {
|
|
|
|
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
|
|
|
|
return 0, "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err)
|
|
|
|
}
|
|
|
|
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
|
|
|
|
usageRatio := *value / floatTarget
|
|
|
|
|
|
|
|
replicaCountProposal := 0
|
|
|
|
if math.Abs(1.0-usageRatio) > tolerance {
|
|
|
|
replicaCountProposal = int(math.Ceil(usageRatio * float64(currentReplicas)))
|
|
|
|
} else {
|
|
|
|
replicaCountProposal = currentReplicas
|
|
|
|
}
|
|
|
|
if replicaCountProposal > replicas {
|
|
|
|
timestamp = currentTimestamp
|
|
|
|
replicas = replicaCountProposal
|
|
|
|
}
|
|
|
|
quantity, err := resource.ParseQuantity(fmt.Sprintf("%.3f", *value))
|
|
|
|
if err != nil {
|
|
|
|
return 0, "", time.Time{}, fmt.Errorf("failed to set custom metric value: %v", err)
|
|
|
|
}
|
|
|
|
statusList.Items = append(statusList.Items, extensions.CustomMetricCurrentStatus{
|
|
|
|
Name: customMetricTarget.Name,
|
|
|
|
CurrentValue: *quantity,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
byteStatusList, err := json.Marshal(statusList)
|
|
|
|
if err != nil {
|
|
|
|
return 0, "", time.Time{}, fmt.Errorf("failed to serialize custom metric status: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return replicas, string(byteStatusList), timestamp, nil
|
|
|
|
}
|
|
|
|
|
2015-10-09 22:49:10 +00:00
|
|
|
func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodAutoscaler) error {
|
2015-11-02 14:26:48 +00:00
|
|
|
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Namespace, hpa.Spec.ScaleRef.Name)
|
2015-09-14 13:08:43 +00:00
|
|
|
|
2015-11-02 15:18:53 +00:00
|
|
|
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
|
2015-08-17 12:18:26 +00:00
|
|
|
if err != nil {
|
2015-11-13 22:30:01 +00:00
|
|
|
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
|
2015-09-14 13:08:43 +00:00
|
|
|
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
|
2015-08-17 12:18:26 +00:00
|
|
|
}
|
2015-09-14 13:08:43 +00:00
|
|
|
currentReplicas := scale.Status.Replicas
|
2015-08-20 12:55:28 +00:00
|
|
|
|
2016-01-29 11:20:19 +00:00
|
|
|
cpuDesiredReplicas := 0
|
|
|
|
var cpuCurrentUtilization *int = nil
|
|
|
|
cpuTimestamp := time.Time{}
|
|
|
|
|
|
|
|
cmDesiredReplicas := 0
|
|
|
|
cmStatus := ""
|
|
|
|
cmTimestamp := time.Time{}
|
|
|
|
|
2016-02-12 15:26:59 +00:00
|
|
|
desiredReplicas := 0
|
|
|
|
timestamp := time.Now()
|
2016-01-29 11:20:19 +00:00
|
|
|
|
2016-02-12 15:26:59 +00:00
|
|
|
if currentReplicas > hpa.Spec.MaxReplicas {
|
|
|
|
desiredReplicas = hpa.Spec.MaxReplicas
|
|
|
|
} else if hpa.Spec.MinReplicas != nil && currentReplicas < *hpa.Spec.MinReplicas {
|
|
|
|
desiredReplicas = *hpa.Spec.MinReplicas
|
|
|
|
} else if currentReplicas == 0 {
|
|
|
|
desiredReplicas = 1
|
|
|
|
} else {
|
|
|
|
// All basic scenarios covered, the state should be sane, lets use metrics.
|
2016-02-29 11:02:54 +00:00
|
|
|
cmAnnotation, cmAnnotationFound := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
|
2016-02-12 15:26:59 +00:00
|
|
|
|
2016-02-29 11:02:54 +00:00
|
|
|
if hpa.Spec.CPUUtilization != nil || !cmAnnotationFound {
|
2016-02-12 15:26:59 +00:00
|
|
|
cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale)
|
|
|
|
if err != nil {
|
2016-02-23 12:05:07 +00:00
|
|
|
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
|
2016-02-12 15:26:59 +00:00
|
|
|
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
|
|
|
|
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
|
|
|
|
}
|
2016-01-29 11:20:19 +00:00
|
|
|
}
|
|
|
|
|
2016-02-29 11:02:54 +00:00
|
|
|
if cmAnnotationFound {
|
2016-02-12 15:26:59 +00:00
|
|
|
cmDesiredReplicas, cmStatus, cmTimestamp, err = a.computeReplicasForCustomMetrics(hpa, scale, cmAnnotation)
|
|
|
|
if err != nil {
|
2016-02-23 12:05:07 +00:00
|
|
|
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
|
2016-02-12 15:26:59 +00:00
|
|
|
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeCMReplicas", err.Error())
|
|
|
|
return fmt.Errorf("failed to compute desired number of replicas based on Custom Metrics for %s: %v", reference, err)
|
|
|
|
}
|
|
|
|
}
|
2016-01-29 11:20:19 +00:00
|
|
|
|
2016-02-12 15:26:59 +00:00
|
|
|
if cpuDesiredReplicas > desiredReplicas {
|
|
|
|
desiredReplicas = cpuDesiredReplicas
|
|
|
|
timestamp = cpuTimestamp
|
|
|
|
}
|
|
|
|
if cmDesiredReplicas > desiredReplicas {
|
|
|
|
desiredReplicas = cmDesiredReplicas
|
|
|
|
timestamp = cmTimestamp
|
|
|
|
}
|
2015-08-20 12:55:28 +00:00
|
|
|
|
2016-02-12 15:26:59 +00:00
|
|
|
if hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas {
|
|
|
|
desiredReplicas = *hpa.Spec.MinReplicas
|
|
|
|
}
|
2015-08-25 17:16:47 +00:00
|
|
|
|
2016-02-12 15:26:59 +00:00
|
|
|
// TODO: remove when pod idling is done.
|
|
|
|
if desiredReplicas == 0 {
|
|
|
|
desiredReplicas = 1
|
|
|
|
}
|
2015-09-07 10:25:04 +00:00
|
|
|
|
2016-02-12 15:26:59 +00:00
|
|
|
if desiredReplicas > hpa.Spec.MaxReplicas {
|
|
|
|
desiredReplicas = hpa.Spec.MaxReplicas
|
|
|
|
}
|
2015-09-14 13:08:43 +00:00
|
|
|
}
|
|
|
|
|
2016-02-23 10:29:40 +00:00
|
|
|
rescale := shouldScale(hpa, currentReplicas, desiredReplicas, timestamp)
|
|
|
|
if rescale {
|
|
|
|
scale.Spec.Replicas = desiredReplicas
|
|
|
|
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
|
|
|
|
if err != nil {
|
|
|
|
a.eventRecorder.Eventf(&hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
|
|
|
|
return fmt.Errorf("failed to rescale %s: %v", reference, err)
|
|
|
|
}
|
|
|
|
a.eventRecorder.Eventf(&hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas)
|
|
|
|
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d",
|
|
|
|
hpa.Name, currentReplicas, desiredReplicas)
|
|
|
|
} else {
|
|
|
|
desiredReplicas = currentReplicas
|
|
|
|
}
|
|
|
|
|
|
|
|
return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
|
|
|
|
}
|
|
|
|
|
|
|
|
func shouldScale(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool {
|
2015-09-14 13:08:43 +00:00
|
|
|
if desiredReplicas != currentReplicas {
|
|
|
|
// Going down only if the usageRatio dropped significantly below the target
|
|
|
|
// and there was no rescaling in the last downscaleForbiddenWindow.
|
2015-10-13 15:24:23 +00:00
|
|
|
if desiredReplicas < currentReplicas &&
|
|
|
|
(hpa.Status.LastScaleTime == nil ||
|
2015-12-13 08:54:43 +00:00
|
|
|
hpa.Status.LastScaleTime.Add(downscaleForbiddenWindow).Before(timestamp)) {
|
2016-02-23 10:29:40 +00:00
|
|
|
return true
|
2015-09-07 10:25:04 +00:00
|
|
|
}
|
|
|
|
|
2015-09-14 13:08:43 +00:00
|
|
|
// Going up only if the usage ratio increased significantly above the target
|
|
|
|
// and there was no rescaling in the last upscaleForbiddenWindow.
|
2015-10-13 15:24:23 +00:00
|
|
|
if desiredReplicas > currentReplicas &&
|
|
|
|
(hpa.Status.LastScaleTime == nil ||
|
2015-12-13 08:54:43 +00:00
|
|
|
hpa.Status.LastScaleTime.Add(upscaleForbiddenWindow).Before(timestamp)) {
|
2016-02-23 10:29:40 +00:00
|
|
|
return true
|
2015-09-07 10:25:04 +00:00
|
|
|
}
|
2015-09-14 13:08:43 +00:00
|
|
|
}
|
2016-02-23 10:29:40 +00:00
|
|
|
return false
|
|
|
|
}
|
2015-08-25 17:16:47 +00:00
|
|
|
|
2016-02-23 12:05:07 +00:00
|
|
|
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa extensions.HorizontalPodAutoscaler, currentReplicas int) {
|
|
|
|
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("%v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-23 10:29:40 +00:00
|
|
|
func (a *HorizontalController) updateStatus(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error {
|
2015-10-09 22:49:10 +00:00
|
|
|
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
|
2015-10-13 15:24:23 +00:00
|
|
|
CurrentReplicas: currentReplicas,
|
|
|
|
DesiredReplicas: desiredReplicas,
|
2016-01-29 11:20:19 +00:00
|
|
|
CurrentCPUUtilizationPercentage: cpuCurrentUtilization,
|
2015-10-26 15:36:05 +00:00
|
|
|
LastScaleTime: hpa.Status.LastScaleTime,
|
2015-09-14 13:08:43 +00:00
|
|
|
}
|
2016-01-29 11:20:19 +00:00
|
|
|
if cmStatus != "" {
|
|
|
|
hpa.Annotations[HpaCustomMetricsStatusAnnotationName] = cmStatus
|
|
|
|
}
|
|
|
|
|
2015-09-14 13:08:43 +00:00
|
|
|
if rescale {
|
2015-12-02 08:24:17 +00:00
|
|
|
now := unversioned.NewTime(time.Now())
|
2015-10-13 15:24:23 +00:00
|
|
|
hpa.Status.LastScaleTime = &now
|
2015-09-14 13:08:43 +00:00
|
|
|
}
|
|
|
|
|
2016-02-23 10:29:40 +00:00
|
|
|
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa)
|
2015-09-14 13:08:43 +00:00
|
|
|
if err != nil {
|
2015-11-13 22:30:01 +00:00
|
|
|
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
2015-09-14 13:08:43 +00:00
|
|
|
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-08-25 17:16:47 +00:00
|
|
|
|
2015-09-14 13:08:43 +00:00
|
|
|
func (a *HorizontalController) reconcileAutoscalers() error {
|
|
|
|
ns := api.NamespaceAll
|
2015-11-02 15:18:53 +00:00
|
|
|
list, err := a.hpaNamespacer.HorizontalPodAutoscalers(ns).List(api.ListOptions{})
|
2015-09-14 13:08:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error listing nodes: %v", err)
|
|
|
|
}
|
|
|
|
for _, hpa := range list.Items {
|
|
|
|
err := a.reconcileAutoscaler(hpa)
|
2015-08-25 17:16:47 +00:00
|
|
|
if err != nil {
|
2015-09-14 13:08:43 +00:00
|
|
|
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
|
2015-08-25 17:16:47 +00:00
|
|
|
}
|
2015-08-20 12:55:28 +00:00
|
|
|
}
|
2015-08-17 12:18:26 +00:00
|
|
|
return nil
|
|
|
|
}
|