Merge pull request #52238 from mattjmcnaughton/mattjmcnaughton/address-golint-errors-in-podautoscaler

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix golint errors in `pkg/controller/podautoscaler`

**What this PR does / why we need it**:

Address `golint` errors in `pkg/controller/podautoscaler`. Note,
I did not address issues around exported types/functions missing
comments, because I'm not sure what the convention within the k8s project is.

```release-note
NONE
```

Signed-off-by: mattjmcnaughton <mattjmcnaughton@gmail.com>
pull/6/head
Kubernetes Submit Queue 2017-09-27 03:54:43 -07:00 committed by GitHub
commit 7d0977d89f
5 changed files with 143 additions and 144 deletions

View File

@ -106,7 +106,7 @@ type testCase struct {
statusUpdated bool
eventCreated bool
verifyEvents bool
useMetricsApi bool
useMetricsAPI bool
metricsTarget []autoscalingv2.MetricSpec
expectedConditions []autoscalingv1.HorizontalPodAutoscalerCondition
// Channel with names of HPA objects which we have reconciled.
@ -483,44 +483,44 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
}
return true, metrics, nil
} else {
name := getForAction.GetName()
mapper := api.Registry.RESTMapper()
metrics := &cmapi.MetricValueList{}
var matchedTarget *autoscalingv2.MetricSpec
for i, target := range tc.metricsTarget {
if target.Type == autoscalingv2.ObjectMetricSourceType && name == target.Object.Target.Name {
gk := schema.FromAPIVersionAndKind(target.Object.Target.APIVersion, target.Object.Target.Kind).GroupKind()
mapping, err := mapper.RESTMapping(gk)
if err != nil {
t.Logf("unable to get mapping for %s: %v", gk.String(), err)
continue
}
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
}
if getForAction.GetResource().Resource == groupResource.String() {
matchedTarget = &tc.metricsTarget[i]
}
name := getForAction.GetName()
mapper := api.Registry.RESTMapper()
metrics := &cmapi.MetricValueList{}
var matchedTarget *autoscalingv2.MetricSpec
for i, target := range tc.metricsTarget {
if target.Type == autoscalingv2.ObjectMetricSourceType && name == target.Object.Target.Name {
gk := schema.FromAPIVersionAndKind(target.Object.Target.APIVersion, target.Object.Target.Kind).GroupKind()
mapping, err := mapper.RESTMapping(gk)
if err != nil {
t.Logf("unable to get mapping for %s: %v", gk.String(), err)
continue
}
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
if getForAction.GetResource().Resource == groupResource.String() {
matchedTarget = &tc.metricsTarget[i]
}
}
assert.NotNil(t, matchedTarget, "this request should have matched one of the metric specs")
assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")
metrics.Items = []cmapi.MetricValue{
{
DescribedObject: v1.ObjectReference{
Kind: matchedTarget.Object.Target.Kind,
APIVersion: matchedTarget.Object.Target.APIVersion,
Name: name,
},
Timestamp: metav1.Time{Time: time.Now()},
MetricName: "qps",
Value: *resource.NewMilliQuantity(int64(tc.reportedLevels[0]), resource.DecimalSI),
},
}
return true, metrics, nil
}
assert.NotNil(t, matchedTarget, "this request should have matched one of the metric specs")
assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")
metrics.Items = []cmapi.MetricValue{
{
DescribedObject: v1.ObjectReference{
Kind: matchedTarget.Object.Target.Kind,
APIVersion: matchedTarget.Object.Target.APIVersion,
Name: name,
},
Timestamp: metav1.Time{Time: time.Now()},
MetricName: "qps",
Value: *resource.NewMilliQuantity(int64(tc.reportedLevels[0]), resource.DecimalSI),
},
}
return true, metrics, nil
})
return fakeClient, fakeMetricsClient, fakeCMClient
@ -634,7 +634,7 @@ func TestScaleUp(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -651,7 +651,7 @@ func TestScaleUpUnreadyLessScale(t *testing.T) {
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -668,7 +668,7 @@ func TestScaleUpUnreadyNoScale(t *testing.T) {
reportedLevels: []uint64{400, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
@ -688,7 +688,7 @@ func TestScaleUpDeployment(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
resource: &fakeResource{
name: "test-dep",
apiVersion: "extensions/v1beta1",
@ -708,7 +708,7 @@ func TestScaleUpReplicaSet(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
resource: &fakeResource{
name: "test-replicaset",
apiVersion: "extensions/v1beta1",
@ -827,7 +827,7 @@ func TestScaleDown(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -892,7 +892,7 @@ func TestScaleDownIgnoresUnreadyPods(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
}
tc.runTest(t)
@ -907,7 +907,7 @@ func TestTolerance(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{1010, 1030, 1020},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
@ -983,7 +983,7 @@ func TestMinReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{10, 95, 10},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
@ -1002,7 +1002,7 @@ func TestMinReplicasDesiredZero(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{0, 0, 0},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
@ -1021,7 +1021,7 @@ func TestZeroReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "ScalingDisabled"},
@ -1039,7 +1039,7 @@ func TestTooFewReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
},
@ -1056,7 +1056,7 @@ func TestTooManyReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
},
@ -1073,7 +1073,7 @@ func TestMaxReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{8000, 9500, 1000},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
@ -1092,7 +1092,7 @@ func TestSuperfluousMetrics(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
@ -1111,7 +1111,7 @@ func TestMissingMetrics(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{400, 95},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -1125,7 +1125,7 @@ func TestEmptyMetrics(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},
@ -1142,7 +1142,7 @@ func TestEmptyCPURequest(t *testing.T) {
desiredReplicas: 1,
CPUTarget: 100,
reportedLevels: []uint64{200},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},
@ -1161,7 +1161,7 @@ func TestEventCreated(t *testing.T) {
reportedLevels: []uint64{200},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")},
verifyEvents: true,
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -1176,7 +1176,7 @@ func TestEventNotCreated(t *testing.T) {
reportedLevels: []uint64{200, 200},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")},
verifyEvents: true,
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
@ -1195,7 +1195,7 @@ func TestMissingReports(t *testing.T) {
CPUTarget: 50,
reportedLevels: []uint64{200},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -1209,7 +1209,7 @@ func TestUpscaleCap(t *testing.T) {
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
@ -1228,7 +1228,7 @@ func TestConditionInvalidSelectorMissing(t *testing.T) {
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv1.AbleToScale,
@ -1273,7 +1273,7 @@ func TestConditionInvalidSelectorUnparsable(t *testing.T) {
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv1.AbleToScale,
@ -1347,17 +1347,17 @@ func TestConditionFailedGetMetrics(t *testing.T) {
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
}
_, testMetricsClient, testCMClient := tc.prepareTestClient(t)
tc.testMetricsClient = testMetricsClient
tc.testCMClient = testCMClient
testMetricsClient.PrependReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &metricsapi.PodMetricsList{}, fmt.Errorf("something went wrong!")
return true, &metricsapi.PodMetricsList{}, fmt.Errorf("something went wrong")
})
testCMClient.PrependReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &cmapi.MetricValueList{}, fmt.Errorf("something went wrong!")
return true, &cmapi.MetricValueList{}, fmt.Errorf("something went wrong")
})
tc.expectedConditions = []autoscalingv1.HorizontalPodAutoscalerCondition{
@ -1412,7 +1412,7 @@ func TestConditionFailedGetScale(t *testing.T) {
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv1.AbleToScale,
@ -1426,7 +1426,7 @@ func TestConditionFailedGetScale(t *testing.T) {
tc.testClient = testClient
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &extensions.Scale{}, fmt.Errorf("something went wrong!")
return true, &extensions.Scale{}, fmt.Errorf("something went wrong")
})
tc.runTest(t)
@ -1441,7 +1441,7 @@ func TestConditionFailedUpdateScale(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{150, 150, 150},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionFalse,
@ -1453,7 +1453,7 @@ func TestConditionFailedUpdateScale(t *testing.T) {
tc.testClient = testClient
testClient.PrependReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &extensions.Scale{}, fmt.Errorf("something went wrong!")
return true, &extensions.Scale{}, fmt.Errorf("something went wrong")
})
tc.runTest(t)
@ -1469,7 +1469,7 @@ func TestBackoffUpscale(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{150, 150, 150},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
lastScaleTime: &time,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
@ -1494,7 +1494,7 @@ func TestBackoffDownscale(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{50, 50, 50},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
lastScaleTime: &time,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
@ -1526,7 +1526,7 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01
finalCpuPercentTarget := int32(target * 100)
finalCPUPercentTarget := int32(target * 100)
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
// i.e. .60 * 20 -> scaled down expectation.
@ -1538,7 +1538,7 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
maxReplicas: 1000,
initialReplicas: startPods,
desiredReplicas: finalPods,
CPUTarget: finalCpuPercentTarget,
CPUTarget: finalCPUPercentTarget,
reportedLevels: []uint64{
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
@ -1563,7 +1563,7 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
@ -1571,8 +1571,8 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
// Reuse the data structure above, now testing "unscaling".
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
target = math.Abs(1/(requestedToUsed*(1-tolerance))) + .004
finalCpuPercentTarget = int32(target * 100)
tc.CPUTarget = finalCpuPercentTarget
finalCPUPercentTarget = int32(target * 100)
tc.CPUTarget = finalCPUPercentTarget
tc.initialReplicas = startPods
tc.desiredReplicas = startPods
tc.expectedConditions = statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
@ -1593,7 +1593,7 @@ func TestScaleUpRCImmediately(t *testing.T) {
verifyCPUCurrent: false,
reportedLevels: []uint64{0, 0, 0, 0},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
lastScaleTime: &time,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
@ -1612,7 +1612,7 @@ func TestScaleDownRCImmediately(t *testing.T) {
CPUTarget: 50,
reportedLevels: []uint64{8000, 9500, 1000},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
lastScaleTime: &time,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
@ -1633,7 +1633,7 @@ func TestAvoidUncessaryUpdates(t *testing.T) {
reportedLevels: []uint64{400, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
useMetricsApi: true,
useMetricsAPI: true,
}
testClient, _, _ := tc.prepareTestClient(t)
tc.testClient = testClient

View File

@ -86,7 +86,7 @@ type legacyTestCase struct {
statusUpdated bool
eventCreated bool
verifyEvents bool
useMetricsApi bool
useMetricsAPI bool
metricsTarget []autoscalingv2.MetricSpec
// Channel with names of HPA objects which we have reconciled.
processed chan string
@ -319,7 +319,7 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
var heapsterRawMemResponse []byte
if tc.useMetricsApi {
if tc.useMetricsAPI {
metrics := metricsapi.PodMetricsList{}
for i, cpu := range tc.reportedLevels {
podMetric := metricsapi.PodMetrics{
@ -530,7 +530,7 @@ func LegacyTestScaleUp(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -547,7 +547,7 @@ func LegacyTestScaleUpUnreadyLessScale(t *testing.T) {
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -564,7 +564,7 @@ func LegacyTestScaleUpUnreadyNoScale(t *testing.T) {
reportedLevels: []uint64{400, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -579,7 +579,7 @@ func LegacyTestScaleUpDeployment(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
resource: &fakeResource{
name: "test-dep",
apiVersion: "extensions/v1beta1",
@ -599,7 +599,7 @@ func LegacyTestScaleUpReplicaSet(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
resource: &fakeResource{
name: "test-replicaset",
apiVersion: "extensions/v1beta1",
@ -687,7 +687,7 @@ func LegacyTestScaleDown(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -725,7 +725,7 @@ func LegacyTestScaleDownIgnoresUnreadyPods(t *testing.T) {
verifyCPUCurrent: true,
reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
}
tc.runTest(t)
@ -740,7 +740,7 @@ func LegacyTestTolerance(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{1010, 1030, 1020},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -775,7 +775,7 @@ func LegacyTestMinReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{10, 95, 10},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -789,7 +789,7 @@ func LegacyTestZeroReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -803,7 +803,7 @@ func LegacyTestTooFewReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -817,7 +817,7 @@ func LegacyTestTooManyReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -831,7 +831,7 @@ func LegacyTestMaxReplicas(t *testing.T) {
CPUTarget: 90,
reportedLevels: []uint64{8000, 9500, 1000},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -845,7 +845,7 @@ func LegacyTestSuperfluousMetrics(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -859,7 +859,7 @@ func LegacyTestMissingMetrics(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{400, 95},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -873,7 +873,7 @@ func LegacyTestEmptyMetrics(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -886,7 +886,7 @@ func LegacyTestEmptyCPURequest(t *testing.T) {
desiredReplicas: 1,
CPUTarget: 100,
reportedLevels: []uint64{200},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -901,7 +901,7 @@ func LegacyTestEventCreated(t *testing.T) {
reportedLevels: []uint64{200},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")},
verifyEvents: true,
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -916,7 +916,7 @@ func LegacyTestEventNotCreated(t *testing.T) {
reportedLevels: []uint64{200, 200},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")},
verifyEvents: true,
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -930,7 +930,7 @@ func LegacyTestMissingReports(t *testing.T) {
CPUTarget: 50,
reportedLevels: []uint64{200},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -944,7 +944,7 @@ func LegacyTestUpscaleCap(t *testing.T) {
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
}
@ -966,7 +966,7 @@ func LegacyTestComputedToleranceAlgImplementation(t *testing.T) {
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01
finalCpuPercentTarget := int32(target * 100)
finalCPUPercentTarget := int32(target * 100)
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
// i.e. .60 * 20 -> scaled down expectation.
@ -978,7 +978,7 @@ func LegacyTestComputedToleranceAlgImplementation(t *testing.T) {
maxReplicas: 1000,
initialReplicas: startPods,
desiredReplicas: finalPods,
CPUTarget: finalCpuPercentTarget,
CPUTarget: finalCPUPercentTarget,
reportedLevels: []uint64{
totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10,
@ -1003,7 +1003,7 @@ func LegacyTestComputedToleranceAlgImplementation(t *testing.T) {
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
},
useMetricsApi: true,
useMetricsAPI: true,
}
tc.runTest(t)
@ -1011,8 +1011,8 @@ func LegacyTestComputedToleranceAlgImplementation(t *testing.T) {
// Reuse the data structure above, now testing "unscaling".
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
target = math.Abs(1/(requestedToUsed*(1-tolerance))) + .004
finalCpuPercentTarget = int32(target * 100)
tc.CPUTarget = finalCpuPercentTarget
finalCPUPercentTarget = int32(target * 100)
tc.CPUTarget = finalCPUPercentTarget
tc.initialReplicas = startPods
tc.desiredReplicas = startPods
tc.runTest(t)
@ -1028,7 +1028,7 @@ func LegacyTestScaleUpRCImmediately(t *testing.T) {
verifyCPUCurrent: false,
reportedLevels: []uint64{0, 0, 0, 0},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
useMetricsAPI: true,
lastScaleTime: &time,
}
tc.runTest(t)
@ -1044,7 +1044,7 @@ func LegacyTestScaleDownRCImmediately(t *testing.T) {
CPUTarget: 50,
reportedLevels: []uint64{8000, 9500, 1000},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
useMetricsAPI: true,
lastScaleTime: &time,
}
tc.runTest(t)

View File

@ -604,7 +604,7 @@ func LegacyTestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01
finalCpuPercentTarget := int32(target * 100)
finalCPUPercentTarget := int32(target * 100)
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
// i.e. .60 * 20 -> scaled down expectation.
@ -641,7 +641,7 @@ func LegacyTestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
},
targetUtilization: finalCpuPercentTarget,
targetUtilization: finalCPUPercentTarget,
expectedUtilization: int32(totalUsedCPUOfAllPods*100) / totalRequestedCPUOfAllPods,
expectedValue: numContainersPerPod * totalUsedCPUOfAllPods / 10,
},
@ -652,8 +652,8 @@ func LegacyTestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
// Reuse the data structure above, now testing "unscaling".
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
target = math.Abs(1/(requestedToUsed*(1-tolerance))) + .004
finalCpuPercentTarget = int32(target * 100)
tc.resource.targetUtilization = finalCpuPercentTarget
finalCPUPercentTarget = int32(target * 100)
tc.resource.targetUtilization = finalCPUPercentTarget
tc.currentReplicas = startPods
tc.expectedReplicas = startPods
tc.runTest(t)

View File

@ -46,7 +46,7 @@ func (r *FixedItemIntervalRateLimiter) NumRequeues(item interface{}) int {
func (r *FixedItemIntervalRateLimiter) Forget(item interface{}) {
}
// NewDefaultHPARateLimitter creates a rate limitter which limits overall (as per the
// NewDefaultHPARateLimiter creates a rate limitter which limits overall (as per the
// default controller rate limiter), as well as per the resync interval
func NewDefaultHPARateLimiter(interval time.Duration) workqueue.RateLimiter {
return NewFixedItemIntervalRateLimiter(interval)

View File

@ -206,36 +206,35 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
}
return true, &metrics, nil
} else {
name := getForAction.GetName()
mapper := api.Registry.RESTMapper()
metrics := &cmapi.MetricValueList{}
assert.NotNil(t, tc.metric.singleObject, "should have only requested a single-object metric when calling GetObjectMetricReplicas")
gk := schema.FromAPIVersionAndKind(tc.metric.singleObject.APIVersion, tc.metric.singleObject.Kind).GroupKind()
mapping, err := mapper.RESTMapping(gk)
if err != nil {
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
}
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
assert.Equal(t, tc.metric.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
metrics.Items = []cmapi.MetricValue{
{
DescribedObject: v1.ObjectReference{
Kind: tc.metric.singleObject.Kind,
APIVersion: tc.metric.singleObject.APIVersion,
Name: name,
},
Timestamp: metav1.Time{Time: tc.timestamp},
MetricName: tc.metric.name,
Value: *resource.NewMilliQuantity(int64(tc.metric.levels[0]), resource.DecimalSI),
},
}
return true, metrics, nil
}
name := getForAction.GetName()
mapper := api.Registry.RESTMapper()
metrics := &cmapi.MetricValueList{}
assert.NotNil(t, tc.metric.singleObject, "should have only requested a single-object metric when calling GetObjectMetricReplicas")
gk := schema.FromAPIVersionAndKind(tc.metric.singleObject.APIVersion, tc.metric.singleObject.Kind).GroupKind()
mapping, err := mapper.RESTMapping(gk)
if err != nil {
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
}
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
assert.Equal(t, tc.metric.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
metrics.Items = []cmapi.MetricValue{
{
DescribedObject: v1.ObjectReference{
Kind: tc.metric.singleObject.Kind,
APIVersion: tc.metric.singleObject.APIVersion,
Name: name,
},
Timestamp: metav1.Time{Time: tc.timestamp},
MetricName: tc.metric.name,
Value: *resource.NewMilliQuantity(int64(tc.metric.levels[0]), resource.DecimalSI),
},
}
return true, metrics, nil
})
return fakeClient, fakeMetricsClient, fakeCMClient
@ -729,7 +728,7 @@ func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01
finalCpuPercentTarget := int32(target * 100)
finalCPUPercentTarget := int32(target * 100)
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
// i.e. .60 * 20 -> scaled down expectation.
@ -766,7 +765,7 @@ func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
},
targetUtilization: finalCpuPercentTarget,
targetUtilization: finalCPUPercentTarget,
expectedUtilization: int32(totalUsedCPUOfAllPods*100) / totalRequestedCPUOfAllPods,
expectedValue: numContainersPerPod * totalUsedCPUOfAllPods / 10,
},
@ -777,8 +776,8 @@ func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
// Reuse the data structure above, now testing "unscaling".
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
target = math.Abs(1/(requestedToUsed*(1-tolerance))) + .004
finalCpuPercentTarget = int32(target * 100)
tc.resource.targetUtilization = finalCpuPercentTarget
finalCPUPercentTarget = int32(target * 100)
tc.resource.targetUtilization = finalCPUPercentTarget
tc.currentReplicas = startPods
tc.expectedReplicas = startPods
tc.runTest(t)