mirror of https://github.com/k3s-io/k3s
Make HPA controller use polymorphic scale client
This updates the HPA controller to use the polymorphic scale client from client-go. This should enable HPAs to work with arbitrary scalable resources, instead of just those in the extensions API group (meaning we can deprecate the copy of ReplicationController in extensions/v1beta1). It also means that the HPA controller now pays attention to the APIVersion field in `scaleTargetRef` (more specifically, the group part of it). Note that currently, discovery information on which resources are available where is only fetched once (the first time that it's requested). In the future, we may want a refreshing discovery REST mapper.pull/6/head
parent
3f51babdd1
commit
d2b41120ea
|
@ -124,6 +124,7 @@ go_library(
|
|||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
|
|
|
@ -21,7 +21,12 @@ limitations under the License.
|
|||
package app
|
||||
|
||||
import (
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/discovery"
|
||||
discocache "k8s.io/client-go/discovery/cached" // Saturday Night Fever
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1"
|
||||
|
@ -63,16 +68,33 @@ func startHPAControllerWithLegacyClient(ctx ControllerContext) (bool, error) {
|
|||
}
|
||||
|
||||
func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (bool, error) {
|
||||
hpaClientGoClient := ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler")
|
||||
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||
hpaClientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
|
||||
|
||||
// TODO: we need something like deferred discovery REST mapper that calls invalidate
|
||||
// on cache misses.
|
||||
cachedDiscovery := discocache.NewMemCacheClient(hpaClientGoClient.Discovery())
|
||||
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscovery, apimeta.InterfacesForUnstructured)
|
||||
restMapper.Reset()
|
||||
// we don't use cached discovery because DiscoveryScaleKindResolver does its own caching,
|
||||
// so we want to re-fetch every time when we actually ask for it
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(hpaClientGoClient.Discovery())
|
||||
scaleClient, err := scale.NewForConfig(hpaClientConfig, restMapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
replicaCalc := podautoscaler.NewReplicaCalculator(
|
||||
metricsClient,
|
||||
hpaClient.Core(),
|
||||
ctx.Options.HorizontalPodAutoscalerTolerance,
|
||||
)
|
||||
go podautoscaler.NewHorizontalController(
|
||||
ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler").Core(),
|
||||
hpaClient.Extensions(),
|
||||
hpaClientGoClient.Core(),
|
||||
scaleClient,
|
||||
hpaClient.Autoscaling(),
|
||||
restMapper,
|
||||
replicaCalc,
|
||||
ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
ctx.Options.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
|
|
|
@ -12,7 +12,6 @@ go_library(
|
|||
deps = [
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//vendor/github.com/google/gofuzz:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
],
|
||||
|
|
|
@ -24,9 +24,9 @@ go_library(
|
|||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
@ -39,8 +39,8 @@ go_library(
|
|||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
|
@ -58,6 +58,7 @@ go_test(
|
|||
importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/apis/autoscaling/install:go_default_library",
|
||||
|
@ -69,15 +70,16 @@ go_test(
|
|||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
|
||||
|
|
|
@ -25,9 +25,9 @@ import (
|
|||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -39,8 +39,8 @@ import (
|
|||
"k8s.io/client-go/kubernetes/scheme"
|
||||
autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -57,8 +57,9 @@ var (
|
|||
// in the system with the actual deployments/replication controllers they
|
||||
// control.
|
||||
type HorizontalController struct {
|
||||
scaleNamespacer extensionsclient.ScalesGetter
|
||||
scaleNamespacer scaleclient.ScalesGetter
|
||||
hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter
|
||||
mapper apimeta.RESTMapper
|
||||
|
||||
replicaCalc *ReplicaCalculator
|
||||
eventRecorder record.EventRecorder
|
||||
|
@ -78,8 +79,9 @@ type HorizontalController struct {
|
|||
// NewHorizontalController creates a new HorizontalController.
|
||||
func NewHorizontalController(
|
||||
evtNamespacer v1core.EventsGetter,
|
||||
scaleNamespacer extensionsclient.ScalesGetter,
|
||||
scaleNamespacer scaleclient.ScalesGetter,
|
||||
hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter,
|
||||
mapper apimeta.RESTMapper,
|
||||
replicaCalc *ReplicaCalculator,
|
||||
hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer,
|
||||
resyncPeriod time.Duration,
|
||||
|
@ -99,7 +101,8 @@ func NewHorizontalController(
|
|||
hpaNamespacer: hpaNamespacer,
|
||||
upscaleForbiddenWindow: upscaleForbiddenWindow,
|
||||
downscaleForbiddenWindow: downscaleForbiddenWindow,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"),
|
||||
mapper: mapper,
|
||||
}
|
||||
|
||||
hpaInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
|
@ -189,7 +192,7 @@ func (a *HorizontalController) processNextWorkItem() bool {
|
|||
// Computes the desired number of replicas for the metric specifications listed in the HPA, returning the maximum
|
||||
// of the computed replica counts, a description of the associated metric, and the statuses of all metrics
|
||||
// computed.
|
||||
func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *extensions.Scale,
|
||||
func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale,
|
||||
metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) {
|
||||
|
||||
currentReplicas := scale.Status.Replicas
|
||||
|
@ -197,21 +200,14 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
|||
statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs))
|
||||
|
||||
for i, metricSpec := range metricSpecs {
|
||||
if len(scale.Status.Selector) == 0 && len(scale.Status.TargetSelector) == 0 {
|
||||
if scale.Status.Selector == "" {
|
||||
errMsg := "selector is required"
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
var selector labels.Selector
|
||||
var err error
|
||||
if len(scale.Status.Selector) > 0 {
|
||||
selector = labels.SelectorFromSet(labels.Set(scale.Status.Selector))
|
||||
err = nil
|
||||
} else {
|
||||
selector, err = labels.Parse(scale.Status.TargetSelector)
|
||||
}
|
||||
selector, err := labels.Parse(scale.Status.Selector)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
|
@ -349,7 +345,28 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
|||
|
||||
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name)
|
||||
|
||||
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name)
|
||||
targetGV, err := schema.ParseGroupVersion(hpa.Spec.ScaleTargetRef.APIVersion)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
|
||||
a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
|
||||
return fmt.Errorf("invalid API version in scale target reference: %v", err)
|
||||
}
|
||||
|
||||
targetGK := schema.GroupKind{
|
||||
Group: targetGV.Group,
|
||||
Kind: hpa.Spec.ScaleTargetRef.Kind,
|
||||
}
|
||||
|
||||
mappings, err := a.mapper.RESTMappings(targetGK)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
|
||||
a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
|
||||
return fmt.Errorf("unable to determine resource for scale target reference: %v", err)
|
||||
}
|
||||
|
||||
scale, targetGR, err := a.scaleForResourceMappings(hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, mappings)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
|
||||
|
@ -479,7 +496,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
|||
|
||||
if rescale {
|
||||
scale.Spec.Replicas = desiredReplicas
|
||||
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale)
|
||||
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(targetGR, scale)
|
||||
if err != nil {
|
||||
a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err)
|
||||
|
@ -526,6 +543,35 @@ func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutos
|
|||
return false
|
||||
}
|
||||
|
||||
// scaleForResourceMappings attempts to fetch the scale for the
|
||||
// resource with the given name and namespace, trying each RESTMapping
|
||||
// in turn until a working one is found. If none work, the first error
|
||||
// is returned. It returns both the scale, as well as the group-resource from
|
||||
// the working mapping.
|
||||
func (a *HorizontalController) scaleForResourceMappings(namespace, name string, mappings []*apimeta.RESTMapping) (*autoscalingv1.Scale, schema.GroupResource, error) {
|
||||
var firstErr error
|
||||
for i, mapping := range mappings {
|
||||
targetGR := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource).GroupResource()
|
||||
scale, err := a.scaleNamespacer.Scales(namespace).Get(targetGR, name)
|
||||
if err == nil {
|
||||
return scale, targetGR, nil
|
||||
}
|
||||
|
||||
// if this is the first error, remember it,
|
||||
// then go on and try other mappings until we find a good one
|
||||
if i == 0 {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we handle an empty set of mappings
|
||||
if firstErr == nil {
|
||||
firstErr = fmt.Errorf("unrecognized resource")
|
||||
}
|
||||
|
||||
return nil, schema.GroupResource{}, firstErr
|
||||
}
|
||||
|
||||
// setCurrentReplicasInStatus sets the current replica count in the status of the HPA.
|
||||
func (a *HorizontalController) setCurrentReplicasInStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32) {
|
||||
a.setStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentMetrics, false)
|
||||
|
|
|
@ -27,15 +27,16 @@ import (
|
|||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
clientfake "k8s.io/client-go/kubernetes/fake"
|
||||
scalefake "k8s.io/client-go/scale/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
|
@ -121,6 +122,7 @@ type testCase struct {
|
|||
testClient *fake.Clientset
|
||||
testMetricsClient *metricsfake.Clientset
|
||||
testCMClient *cmfake.FakeCustomMetricsClient
|
||||
testScaleClient *scalefake.FakeScaleClient
|
||||
}
|
||||
|
||||
// Needs to be called under a lock.
|
||||
|
@ -144,12 +146,12 @@ func init() {
|
|||
scaleUpLimitFactor = 8
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient) {
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *scalefake.FakeScaleClient) {
|
||||
namespace := "test-namespace"
|
||||
hpaName := "test-hpa"
|
||||
podNamePrefix := "test-pod"
|
||||
// TODO: also test with TargetSelector
|
||||
selector := map[string]string{"name": podNamePrefix}
|
||||
labelSet := map[string]string{"name": podNamePrefix}
|
||||
selector := labels.SelectorFromSet(labelSet).String()
|
||||
|
||||
tc.Lock()
|
||||
|
||||
|
@ -161,13 +163,11 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
tc.computeCPUCurrent()
|
||||
}
|
||||
|
||||
// TODO(madhusudancs): HPA only supports resources in extensions/v1beta1 right now. Add
|
||||
// tests for "v1" replicationcontrollers when HPA adds support for cross-group scale.
|
||||
if tc.resource == nil {
|
||||
tc.resource = &fakeResource{
|
||||
name: "test-rc",
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "replicationcontrollers",
|
||||
apiVersion: "v1",
|
||||
kind: "ReplicationController",
|
||||
}
|
||||
}
|
||||
tc.Unlock()
|
||||
|
@ -239,66 +239,6 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
return true, objv1, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
@ -344,39 +284,6 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
@ -386,8 +293,9 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
|
||||
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
|
||||
if tc.verifyCPUCurrent {
|
||||
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil")
|
||||
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
|
||||
if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") {
|
||||
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
|
||||
}
|
||||
}
|
||||
var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition
|
||||
if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil {
|
||||
|
@ -411,6 +319,100 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient := &scalefake.FakeScaleClient{}
|
||||
fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
|
@ -427,7 +429,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
Labels: selector,
|
||||
Labels: labelSet,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: time.Now()},
|
||||
Containers: []metricsapi.ContainerMetrics{
|
||||
|
@ -522,7 +524,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
|||
return true, metrics, nil
|
||||
})
|
||||
|
||||
return fakeClient, fakeMetricsClient, fakeCMClient
|
||||
return fakeClient, fakeMetricsClient, fakeCMClient, fakeScaleClient
|
||||
}
|
||||
|
||||
func (tc *testCase) verifyResults(t *testing.T) {
|
||||
|
@ -537,7 +539,7 @@ func (tc *testCase) verifyResults(t *testing.T) {
|
|||
}
|
||||
|
||||
func (tc *testCase) setupController(t *testing.T) (*HorizontalController, informers.SharedInformerFactory) {
|
||||
testClient, testMetricsClient, testCMClient := tc.prepareTestClient(t)
|
||||
testClient, testMetricsClient, testCMClient, testScaleClient := tc.prepareTestClient(t)
|
||||
if tc.testClient != nil {
|
||||
testClient = tc.testClient
|
||||
}
|
||||
|
@ -547,6 +549,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
|||
if tc.testCMClient != nil {
|
||||
testCMClient = tc.testCMClient
|
||||
}
|
||||
if tc.testScaleClient != nil {
|
||||
testScaleClient = tc.testScaleClient
|
||||
}
|
||||
metricsClient := metrics.NewRESTMetricsClient(
|
||||
testMetricsClient.MetricsV1beta1(),
|
||||
testCMClient,
|
||||
|
@ -587,8 +592,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
|||
|
||||
hpaController := NewHorizontalController(
|
||||
eventClient.Core(),
|
||||
testClient.Extensions(),
|
||||
testScaleClient,
|
||||
testClient.Autoscaling(),
|
||||
legacyscheme.Registry.RESTMapper(),
|
||||
replicaCalc,
|
||||
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
controller.NoResyncPeriodFunc(),
|
||||
|
@ -692,7 +698,7 @@ func TestScaleUpDeployment(t *testing.T) {
|
|||
resource: &fakeResource{
|
||||
name: "test-dep",
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "deployments",
|
||||
kind: "Deployment",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
|
@ -712,7 +718,7 @@ func TestScaleUpReplicaSet(t *testing.T) {
|
|||
resource: &fakeResource{
|
||||
name: "test-replicaset",
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "replicasets",
|
||||
kind: "ReplicaSet",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
|
@ -1267,18 +1273,18 @@ func TestConditionInvalidSelectorMissing(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testClient, _, _ := tc.prepareTestClient(t)
|
||||
tc.testClient = testClient
|
||||
_, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
tc.testScaleClient = testScaleClient
|
||||
|
||||
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &extensions.Scale{
|
||||
testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
}
|
||||
|
@ -1312,20 +1318,20 @@ func TestConditionInvalidSelectorUnparsable(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testClient, _, _ := tc.prepareTestClient(t)
|
||||
tc.testClient = testClient
|
||||
_, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
tc.testScaleClient = testScaleClient
|
||||
|
||||
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &extensions.Scale{
|
||||
testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
TargetSelector: "cheddar cheese",
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: "cheddar cheese",
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
|
@ -1373,7 +1379,7 @@ func TestConditionFailedGetMetrics(t *testing.T) {
|
|||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
_, testMetricsClient, testCMClient := tc.prepareTestClient(t)
|
||||
_, testMetricsClient, testCMClient, _ := tc.prepareTestClient(t)
|
||||
tc.testMetricsClient = testMetricsClient
|
||||
tc.testCMClient = testCMClient
|
||||
|
||||
|
@ -1446,11 +1452,11 @@ func TestConditionFailedGetScale(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
testClient, _, _ := tc.prepareTestClient(t)
|
||||
tc.testClient = testClient
|
||||
_, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
tc.testScaleClient = testScaleClient
|
||||
|
||||
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &extensions.Scale{}, fmt.Errorf("something went wrong")
|
||||
testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &autoscalingv1.Scale{}, fmt.Errorf("something went wrong")
|
||||
})
|
||||
|
||||
tc.runTest(t)
|
||||
|
@ -1473,11 +1479,11 @@ func TestConditionFailedUpdateScale(t *testing.T) {
|
|||
}),
|
||||
}
|
||||
|
||||
testClient, _, _ := tc.prepareTestClient(t)
|
||||
tc.testClient = testClient
|
||||
_, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
tc.testScaleClient = testScaleClient
|
||||
|
||||
testClient.PrependReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &extensions.Scale{}, fmt.Errorf("something went wrong")
|
||||
testScaleClient.PrependReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, &autoscalingv1.Scale{}, fmt.Errorf("something went wrong")
|
||||
})
|
||||
|
||||
tc.runTest(t)
|
||||
|
@ -1659,7 +1665,7 @@ func TestAvoidUncessaryUpdates(t *testing.T) {
|
|||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
testClient, _, _ := tc.prepareTestClient(t)
|
||||
testClient, _, _, _ := tc.prepareTestClient(t)
|
||||
tc.testClient = testClient
|
||||
var savedHPA *autoscalingv1.HorizontalPodAutoscaler
|
||||
testClient.PrependReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
|
|
|
@ -30,16 +30,18 @@ import (
|
|||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
clientfake "k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
scalefake "k8s.io/client-go/scale/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
||||
|
@ -48,6 +50,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
|
||||
)
|
||||
|
@ -114,12 +117,12 @@ func (tc *legacyTestCase) computeCPUCurrent() {
|
|||
tc.CPUCurrent = int32(100 * reported / requested)
|
||||
}
|
||||
|
||||
func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
func (tc *legacyTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *scalefake.FakeScaleClient) {
|
||||
namespace := "test-namespace"
|
||||
hpaName := "test-hpa"
|
||||
podNamePrefix := "test-pod"
|
||||
// TODO: also test with TargetSelector
|
||||
selector := map[string]string{"name": podNamePrefix}
|
||||
labelSet := map[string]string{"name": podNamePrefix}
|
||||
selector := labels.SelectorFromSet(labelSet).String()
|
||||
|
||||
tc.Lock()
|
||||
|
||||
|
@ -131,13 +134,11 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
tc.computeCPUCurrent()
|
||||
}
|
||||
|
||||
// TODO(madhusudancs): HPA only supports resources in extensions/v1beta1 right now. Add
|
||||
// tests for "v1" replicationcontrollers when HPA adds support for cross-group scale.
|
||||
if tc.resource == nil {
|
||||
tc.resource = &fakeResource{
|
||||
name: "test-rc",
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "replicationcontrollers",
|
||||
apiVersion: "v1",
|
||||
kind: "ReplicationController",
|
||||
}
|
||||
}
|
||||
tc.Unlock()
|
||||
|
@ -208,66 +209,6 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
return true, objv1, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
@ -386,39 +327,6 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
@ -428,8 +336,9 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
|
||||
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
|
||||
if tc.verifyCPUCurrent {
|
||||
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil")
|
||||
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
|
||||
if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") {
|
||||
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
|
||||
}
|
||||
}
|
||||
tc.statusUpdated = true
|
||||
// Every time we reconcile HPA object we are updating status.
|
||||
|
@ -437,10 +346,104 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient := &scalefake.FakeScaleClient{}
|
||||
fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
return fakeClient
|
||||
return fakeClient, fakeScaleClient
|
||||
}
|
||||
|
||||
func (tc *legacyTestCase) verifyResults(t *testing.T) {
|
||||
|
@ -455,7 +458,7 @@ func (tc *legacyTestCase) verifyResults(t *testing.T) {
|
|||
}
|
||||
|
||||
func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
testClient, testScaleClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
|
||||
eventClient := &clientfake.Clientset{}
|
||||
|
@ -493,8 +496,9 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
|||
|
||||
hpaController := NewHorizontalController(
|
||||
eventClient.Core(),
|
||||
testClient.Extensions(),
|
||||
testScaleClient,
|
||||
testClient.Autoscaling(),
|
||||
legacyscheme.Registry.RESTMapper(),
|
||||
replicaCalc,
|
||||
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
controller.NoResyncPeriodFunc(),
|
||||
|
@ -584,7 +588,7 @@ func LegacyTestScaleUpDeployment(t *testing.T) {
|
|||
resource: &fakeResource{
|
||||
name: "test-dep",
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "deployments",
|
||||
kind: "Deployment",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
|
@ -604,7 +608,7 @@ func LegacyTestScaleUpReplicaSet(t *testing.T) {
|
|||
resource: &fakeResource{
|
||||
name: "test-replicaset",
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "replicasets",
|
||||
kind: "ReplicaSet",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
|
|
Loading…
Reference in New Issue