2015-08-28 10:24:00 +00:00
|
|
|
/*
|
2016-06-03 00:25:58 +00:00
|
|
|
Copyright 2015 The Kubernetes Authors.
|
2015-08-28 10:24:00 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package metrics
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2015-09-23 10:00:20 +00:00
|
|
|
"io"
|
2015-08-28 10:24:00 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2017-06-22 18:24:23 +00:00
|
|
|
"k8s.io/api/core/v1"
|
2017-01-25 13:13:07 +00:00
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
2017-01-17 03:38:19 +00:00
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
2017-01-11 14:09:48 +00:00
|
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
2017-06-23 20:56:37 +00:00
|
|
|
"k8s.io/client-go/kubernetes/fake"
|
2017-01-19 18:27:59 +00:00
|
|
|
restclient "k8s.io/client-go/rest"
|
2017-01-25 20:07:10 +00:00
|
|
|
core "k8s.io/client-go/testing"
|
2015-08-28 10:24:00 +00:00
|
|
|
|
2016-04-28 14:29:38 +00:00
|
|
|
heapster "k8s.io/heapster/metrics/api/v1/types"
|
2017-05-12 23:39:10 +00:00
|
|
|
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
2015-08-28 10:24:00 +00:00
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
)
|
|
|
|
|
2015-12-02 08:24:17 +00:00
|
|
|
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
|
|
|
|
|
2015-09-23 10:00:20 +00:00
|
|
|
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
|
|
|
|
return w.raw, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
|
|
|
|
return fakeResponseWrapper{raw: raw}
|
|
|
|
}
|
2015-08-28 10:24:00 +00:00
|
|
|
|
2015-09-23 10:00:20 +00:00
|
|
|
type fakeResponseWrapper struct {
|
|
|
|
raw []byte
|
2015-08-28 10:24:00 +00:00
|
|
|
}
|
|
|
|
|
2015-09-23 10:00:20 +00:00
|
|
|
// timestamp is used for establishing order on metricPoints
|
|
|
|
type metricPoint struct {
|
|
|
|
level uint64
|
|
|
|
timestamp int
|
|
|
|
}
|
|
|
|
|
|
|
|
type testCase struct {
|
2016-12-02 20:18:26 +00:00
|
|
|
desiredMetricValues PodMetricsInfo
|
|
|
|
desiredError error
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
|
|
|
|
replicas int
|
2015-12-02 08:24:17 +00:00
|
|
|
targetTimestamp int
|
2018-08-28 13:04:05 +00:00
|
|
|
window time.Duration
|
2015-09-23 10:00:20 +00:00
|
|
|
reportedMetricsPoints [][]metricPoint
|
2016-05-17 19:31:31 +00:00
|
|
|
reportedPodMetrics [][]int64
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
|
2018-06-28 18:28:13 +00:00
|
|
|
namespace string
|
|
|
|
selector labels.Selector
|
|
|
|
metricSelector labels.Selector
|
|
|
|
resourceName v1.ResourceName
|
|
|
|
metricName string
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
|
|
|
|
2016-01-29 06:34:08 +00:00
|
|
|
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
2015-09-23 10:00:20 +00:00
|
|
|
namespace := "test-namespace"
|
|
|
|
tc.namespace = namespace
|
|
|
|
podNamePrefix := "test-pod"
|
2016-03-09 00:27:13 +00:00
|
|
|
podLabels := map[string]string{"name": podNamePrefix}
|
|
|
|
tc.selector = labels.SelectorFromSet(podLabels)
|
2015-09-23 10:00:20 +00:00
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
// it's a resource test if we have a resource name
|
|
|
|
isResource := len(tc.resourceName) > 0
|
|
|
|
|
2016-01-29 06:34:08 +00:00
|
|
|
fakeClient := &fake.Clientset{}
|
2015-08-28 10:24:00 +00:00
|
|
|
|
2016-01-29 06:34:08 +00:00
|
|
|
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
2016-11-18 20:50:17 +00:00
|
|
|
obj := &v1.PodList{}
|
2015-09-23 10:00:20 +00:00
|
|
|
for i := 0; i < tc.replicas; i++ {
|
|
|
|
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
2016-11-18 20:50:17 +00:00
|
|
|
pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
|
2015-09-23 10:00:20 +00:00
|
|
|
obj.Items = append(obj.Items, pod)
|
2015-08-28 10:24:00 +00:00
|
|
|
}
|
2015-09-23 10:00:20 +00:00
|
|
|
return true, obj, nil
|
|
|
|
})
|
2015-08-28 10:24:00 +00:00
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
if isResource {
|
2016-05-17 19:31:31 +00:00
|
|
|
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
2016-11-30 07:27:27 +00:00
|
|
|
metrics := metricsapi.PodMetricsList{}
|
2016-05-17 19:31:31 +00:00
|
|
|
for i, containers := range tc.reportedPodMetrics {
|
2016-11-30 07:27:27 +00:00
|
|
|
metric := metricsapi.PodMetrics{
|
2017-05-12 23:39:10 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2016-05-17 19:31:31 +00:00
|
|
|
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
|
|
|
Namespace: namespace,
|
|
|
|
},
|
2018-08-28 13:04:05 +00:00
|
|
|
Timestamp: metav1.Time{Time: offsetTimestampBy(tc.targetTimestamp)},
|
|
|
|
Window: metav1.Duration{Duration: tc.window},
|
2016-11-30 07:27:27 +00:00
|
|
|
Containers: []metricsapi.ContainerMetrics{},
|
2016-05-17 19:31:31 +00:00
|
|
|
}
|
|
|
|
for j, cpu := range containers {
|
2016-11-30 07:27:27 +00:00
|
|
|
cm := metricsapi.ContainerMetrics{
|
2016-05-17 19:31:31 +00:00
|
|
|
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
2017-07-15 05:25:54 +00:00
|
|
|
Usage: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: *resource.NewMilliQuantity(
|
2016-05-17 19:31:31 +00:00
|
|
|
cpu,
|
|
|
|
resource.DecimalSI),
|
2017-07-15 05:25:54 +00:00
|
|
|
v1.ResourceMemory: *resource.NewQuantity(
|
2016-05-17 19:31:31 +00:00
|
|
|
int64(1024*1024),
|
|
|
|
resource.BinarySI),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
metric.Containers = append(metric.Containers, cm)
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
2016-08-19 12:18:59 +00:00
|
|
|
metrics.Items = append(metrics.Items, metric)
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
2016-05-17 19:31:31 +00:00
|
|
|
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
|
|
|
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
|
|
|
metrics := heapster.MetricResultList{}
|
|
|
|
var latestTimestamp time.Time
|
|
|
|
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
|
|
|
|
var heapsterMetricPoints []heapster.MetricPoint
|
|
|
|
for _, reportedMetricPoint := range reportedMetricPoints {
|
2018-08-28 13:04:05 +00:00
|
|
|
timestamp := offsetTimestampBy(reportedMetricPoint.timestamp)
|
2016-05-17 19:31:31 +00:00
|
|
|
if latestTimestamp.Before(timestamp) {
|
|
|
|
latestTimestamp = timestamp
|
|
|
|
}
|
|
|
|
heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil}
|
|
|
|
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
|
|
|
|
}
|
|
|
|
metric := heapster.MetricResult{
|
|
|
|
Metrics: heapsterMetricPoints,
|
|
|
|
LatestTimestamp: latestTimestamp,
|
|
|
|
}
|
|
|
|
metrics.Items = append(metrics.Items, metric)
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
2016-05-17 19:31:31 +00:00
|
|
|
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
|
|
|
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
|
|
|
})
|
|
|
|
}
|
2015-09-23 10:00:20 +00:00
|
|
|
|
|
|
|
return fakeClient
|
|
|
|
}
|
|
|
|
|
2016-11-18 20:50:17 +00:00
|
|
|
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
|
|
|
|
return v1.Pod{
|
2017-01-17 03:38:19 +00:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2016-02-19 15:36:18 +00:00
|
|
|
Name: podName,
|
|
|
|
Namespace: namespace,
|
2016-03-09 00:27:13 +00:00
|
|
|
Labels: podLabels,
|
2016-02-19 15:36:18 +00:00
|
|
|
},
|
2016-11-18 20:50:17 +00:00
|
|
|
Spec: v1.PodSpec{
|
|
|
|
Containers: []v1.Container{
|
2016-02-19 15:36:18 +00:00
|
|
|
{
|
2016-11-18 20:50:17 +00:00
|
|
|
Resources: v1.ResourceRequirements{
|
|
|
|
Requests: v1.ResourceList{
|
|
|
|
v1.ResourceCPU: resource.MustParse(request),
|
2016-02-19 15:36:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-11-18 20:50:17 +00:00
|
|
|
Status: v1.PodStatus{
|
2016-02-19 15:36:18 +00:00
|
|
|
Phase: phase,
|
2016-11-18 20:50:17 +00:00
|
|
|
Conditions: []v1.PodCondition{
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
{
|
2016-11-18 20:50:17 +00:00
|
|
|
Type: v1.PodReady,
|
|
|
|
Status: v1.ConditionTrue,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
|
|
|
},
|
2016-02-19 15:36:18 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-02 20:18:26 +00:00
|
|
|
func (tc *testCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestamp time.Time, err error) {
|
2015-09-23 10:00:20 +00:00
|
|
|
if tc.desiredError != nil {
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.Error(t, err, "there should be an error retrieving the metrics")
|
|
|
|
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
|
2015-09-23 10:00:20 +00:00
|
|
|
return
|
2015-08-28 10:24:00 +00:00
|
|
|
}
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.NoError(t, err, "there should be no error retrieving the metrics")
|
|
|
|
assert.NotNil(t, metrics, "there should be metrics returned")
|
2018-08-28 13:04:05 +00:00
|
|
|
if len(metrics) != len(tc.desiredMetricValues) {
|
|
|
|
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
|
|
|
} else {
|
|
|
|
for k, m := range metrics {
|
|
|
|
if !m.Timestamp.Equal(tc.desiredMetricValues[k].Timestamp) ||
|
|
|
|
m.Window != tc.desiredMetricValues[k].Window ||
|
|
|
|
m.Value != tc.desiredMetricValues[k].Value {
|
|
|
|
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp := offsetTimestampBy(tc.targetTimestamp)
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
2015-08-28 10:24:00 +00:00
|
|
|
|
2015-09-23 10:00:20 +00:00
|
|
|
func (tc *testCase) runTest(t *testing.T) {
|
|
|
|
testClient := tc.prepareTestClient(t)
|
2015-11-06 17:55:31 +00:00
|
|
|
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
isResource := len(tc.resourceName) > 0
|
|
|
|
if isResource {
|
|
|
|
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector)
|
|
|
|
tc.verifyResults(t, info, timestamp, err)
|
2016-01-27 20:14:45 +00:00
|
|
|
} else {
|
2018-06-28 18:28:13 +00:00
|
|
|
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector, tc.metricSelector)
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
tc.verifyResults(t, info, timestamp, err)
|
2016-01-27 20:14:45 +00:00
|
|
|
}
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestCPU(t *testing.T) {
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp := 1
|
|
|
|
window := 30 * time.Second
|
2015-09-23 10:00:20 +00:00
|
|
|
tc := testCase{
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
replicas: 3,
|
2016-12-02 20:18:26 +00:00
|
|
|
desiredMetricValues: PodMetricsInfo{
|
2018-08-28 13:04:05 +00:00
|
|
|
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
|
|
|
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
|
|
|
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
2016-11-18 20:50:17 +00:00
|
|
|
resourceName: v1.ResourceCPU,
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp: targetTimestamp,
|
|
|
|
window: window,
|
2016-05-17 19:31:31 +00:00
|
|
|
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
2016-02-19 15:36:18 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-01-27 20:14:45 +00:00
|
|
|
func TestQPS(t *testing.T) {
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp := 1
|
2015-09-23 10:00:20 +00:00
|
|
|
tc := testCase{
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
replicas: 3,
|
|
|
|
desiredMetricValues: PodMetricsInfo{
|
2018-08-28 13:04:05 +00:00
|
|
|
"test-pod-0": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
|
|
|
"test-pod-1": PodMetric{Value: 20000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
|
|
|
"test-pod-2": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
|
|
|
metricName: "qps",
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp: targetTimestamp,
|
2016-01-27 20:14:45 +00:00
|
|
|
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
|
2015-09-08 10:12:28 +00:00
|
|
|
}
|
2015-09-23 10:00:20 +00:00
|
|
|
tc.runTest(t)
|
|
|
|
}
|
2015-09-08 10:12:28 +00:00
|
|
|
|
2016-01-27 20:14:45 +00:00
|
|
|
func TestQpsSumEqualZero(t *testing.T) {
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp := 0
|
2015-09-23 10:00:20 +00:00
|
|
|
tc := testCase{
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
replicas: 3,
|
|
|
|
desiredMetricValues: PodMetricsInfo{
|
2018-08-28 13:04:05 +00:00
|
|
|
"test-pod-0": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
|
|
|
"test-pod-1": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
|
|
|
"test-pod-2": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
|
|
|
metricName: "qps",
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp: targetTimestamp,
|
2015-09-23 10:00:20 +00:00
|
|
|
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
2015-08-28 10:24:00 +00:00
|
|
|
}
|
|
|
|
|
2015-09-23 10:00:20 +00:00
|
|
|
func TestCPUMoreMetrics(t *testing.T) {
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp := 10
|
|
|
|
window := 30 * time.Second
|
2015-09-23 10:00:20 +00:00
|
|
|
tc := testCase{
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
replicas: 5,
|
2016-12-02 20:18:26 +00:00
|
|
|
desiredMetricValues: PodMetricsInfo{
|
2018-08-28 13:04:05 +00:00
|
|
|
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
|
|
|
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
|
|
|
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
|
|
|
"test-pod-3": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
|
|
|
"test-pod-4": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
2016-11-18 20:50:17 +00:00
|
|
|
resourceName: v1.ResourceCPU,
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp: targetTimestamp,
|
|
|
|
window: window,
|
2016-05-17 19:31:31 +00:00
|
|
|
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCPUMissingMetrics(t *testing.T) {
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp := 0
|
|
|
|
window := 30 * time.Second
|
2015-09-23 10:00:20 +00:00
|
|
|
tc := testCase{
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
replicas: 3,
|
2016-12-02 20:18:26 +00:00
|
|
|
desiredMetricValues: PodMetricsInfo{
|
2018-08-28 13:04:05 +00:00
|
|
|
"test-pod-0": PodMetric{Value: 4000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
2016-11-18 20:50:17 +00:00
|
|
|
resourceName: v1.ResourceCPU,
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp: targetTimestamp,
|
|
|
|
window: window,
|
2016-05-17 19:31:31 +00:00
|
|
|
reportedPodMetrics: [][]int64{{4000}},
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-01-27 20:14:45 +00:00
|
|
|
func TestQpsMissingMetrics(t *testing.T) {
|
2015-09-23 10:00:20 +00:00
|
|
|
tc := testCase{
|
|
|
|
replicas: 3,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 1"),
|
|
|
|
metricName: "qps",
|
|
|
|
targetTimestamp: 1,
|
2015-09-23 10:00:20 +00:00
|
|
|
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
2016-01-27 20:14:45 +00:00
|
|
|
func TestQpsSuperfluousMetrics(t *testing.T) {
|
2015-09-23 10:00:20 +00:00
|
|
|
tc := testCase{
|
|
|
|
replicas: 3,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 6"),
|
|
|
|
metricName: "qps",
|
2015-09-23 10:00:20 +00:00
|
|
|
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
|
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCPUEmptyMetrics(t *testing.T) {
|
|
|
|
tc := testCase{
|
|
|
|
replicas: 3,
|
2016-11-18 20:50:17 +00:00
|
|
|
resourceName: v1.ResourceCPU,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
2015-09-23 10:00:20 +00:00
|
|
|
reportedMetricsPoints: [][]metricPoint{},
|
2016-05-17 19:31:31 +00:00
|
|
|
reportedPodMetrics: [][]int64{},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestQpsEmptyEntries(t *testing.T) {
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp := 4
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
tc := testCase{
|
|
|
|
replicas: 3,
|
|
|
|
metricName: "qps",
|
|
|
|
desiredMetricValues: PodMetricsInfo{
|
2018-08-28 13:04:05 +00:00
|
|
|
"test-pod-0": PodMetric{Value: 4000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
|
|
|
"test-pod-2": PodMetric{Value: 2000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp: targetTimestamp,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCPUZeroReplicas(t *testing.T) {
|
|
|
|
tc := testCase{
|
2016-05-17 19:31:31 +00:00
|
|
|
replicas: 0,
|
2016-11-18 20:50:17 +00:00
|
|
|
resourceName: v1.ResourceCPU,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
2016-05-17 19:31:31 +00:00
|
|
|
reportedPodMetrics: [][]int64{},
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp := 0
|
|
|
|
window := 30 * time.Second
|
2015-09-23 10:00:20 +00:00
|
|
|
tc := testCase{
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
replicas: 3,
|
2016-11-18 20:50:17 +00:00
|
|
|
resourceName: v1.ResourceCPU,
|
2016-12-02 20:18:26 +00:00
|
|
|
desiredMetricValues: PodMetricsInfo{
|
2018-08-28 13:04:05 +00:00
|
|
|
"test-pod-0": PodMetric{Value: 100, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
|
|
|
"test-pod-1": PodMetric{Value: 700, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
},
|
2018-08-28 13:04:05 +00:00
|
|
|
targetTimestamp: targetTimestamp,
|
|
|
|
window: window,
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
2015-09-23 10:00:20 +00:00
|
|
|
}
|
|
|
|
tc.runTest(t)
|
2015-08-28 10:24:00 +00:00
|
|
|
}
|
2015-10-13 15:24:23 +00:00
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
func testCollapseTimeSamples(t *testing.T) {
|
2016-02-26 12:44:16 +00:00
|
|
|
now := time.Now()
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
metrics := heapster.MetricResult{
|
|
|
|
Metrics: []heapster.MetricPoint{
|
|
|
|
{Timestamp: now, Value: 50, FloatValue: nil},
|
|
|
|
{Timestamp: now.Add(-15 * time.Second), Value: 100, FloatValue: nil},
|
|
|
|
{Timestamp: now.Add(-60 * time.Second), Value: 100000, FloatValue: nil}},
|
|
|
|
LatestTimestamp: now,
|
2016-02-26 12:44:16 +00:00
|
|
|
}
|
|
|
|
|
HPA: Consider unready pods and missing metrics
Currently, the HPA considers unready pods the same as ready pods when
looking at their CPU and custom metric usage. However, pods frequently
use extra CPU during initialization, so we want to consider them
separately.
This commit causes the HPA to consider unready pods as having 0 CPU
usage when scaling up, and ignores them when scaling down. If, when
scaling up, factoring the unready pods as having 0 CPU would cause a
downscale instead, we simply choose not to scale. Otherwise, we simply
scale up at the reduced amount caculated by factoring the pods in at
zero CPU usage.
The effect is that unready pods cause the autoscaler to be a bit more
conservative -- large increases in CPU usage can still cause scales,
even with unready pods in the mix, but will not cause the scale factors
to be as large, in anticipation of the new pods later becoming ready and
handling load.
Similarly, if there are pods for which no metrics have been retrieved,
these pods are treated as having 100% of the requested metric when
scaling down, and 0% when scaling up. As above, this cannot change the
direction of the scale.
This commit also changes the HPA to ignore superfluous metrics -- as
long as metrics for all ready pods are present, the HPA we make scaling
decisions. Currently, this only works for CPU. For custom metrics, we
cannot identify which metrics go to which pods if we get superfluous
metrics, so we abort the scale.
2016-09-27 18:47:52 +00:00
|
|
|
val, timestamp, hadMetrics := collapseTimeSamples(metrics, time.Minute)
|
|
|
|
assert.True(t, hadMetrics, "should report that it received a populated list of metrics")
|
|
|
|
assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected")
|
|
|
|
assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)")
|
2016-02-26 12:44:16 +00:00
|
|
|
}
|
2018-08-28 13:04:05 +00:00
|
|
|
|
|
|
|
func offsetTimestampBy(t int) time.Time {
|
|
|
|
return fixedTimestamp.Add(time.Duration(t) * time.Minute)
|
|
|
|
}
|