mirror of https://github.com/k3s-io/k3s
commit
f899afb896
|
@ -161,9 +161,9 @@ func calculateSumFromLatestSample(metrics heapster.MetricResultList) (uint64, in
|
||||||
for _, metrics := range metrics.Items {
|
for _, metrics := range metrics.Items {
|
||||||
var newest *heapster.MetricPoint
|
var newest *heapster.MetricPoint
|
||||||
newest = nil
|
newest = nil
|
||||||
for _, metricPoint := range metrics.Metrics {
|
for i, metricPoint := range metrics.Metrics {
|
||||||
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
|
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
|
||||||
newest = &metricPoint
|
newest = &metrics.Metrics[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if newest != nil {
|
if newest != nil {
|
||||||
|
|
|
@ -19,136 +19,350 @@ package metrics
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"io"
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
_ "k8s.io/kubernetes/pkg/api/latest"
|
_ "k8s.io/kubernetes/pkg/api/latest"
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/apis/experimental"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
|
||||||
|
|
||||||
heapster "k8s.io/heapster/api/v1/types"
|
heapster "k8s.io/heapster/api/v1/types"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
|
||||||
namespace = "test-namespace"
|
return w.raw, nil
|
||||||
podName = "pod1"
|
|
||||||
podListHandler = "podlisthandler"
|
|
||||||
heapsterCpuHandler = "heapstercpuhandler"
|
|
||||||
heapsterMemHandler = "heapstermemhandler"
|
|
||||||
cpu = 650
|
|
||||||
memory = 20000000
|
|
||||||
)
|
|
||||||
|
|
||||||
type serverResponse struct {
|
|
||||||
statusCode int
|
|
||||||
obj interface{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httptest.Server, map[string]*util.FakeHandler) {
|
func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) {
|
||||||
handlers := map[string]*util.FakeHandler{}
|
return nil, nil
|
||||||
mux := http.NewServeMux()
|
|
||||||
|
|
||||||
mkHandler := func(url string, response serverResponse) *util.FakeHandler {
|
|
||||||
handler := util.FakeHandler{
|
|
||||||
StatusCode: response.statusCode,
|
|
||||||
ResponseBody: runtime.EncodeOrDie(testapi.Experimental.Codec(), response.obj.(runtime.Object)),
|
|
||||||
}
|
|
||||||
mux.Handle(url, &handler)
|
|
||||||
glog.Infof("Will handle %s", url)
|
|
||||||
return &handler
|
|
||||||
}
|
|
||||||
|
|
||||||
mkRawHandler := func(url string, response serverResponse) *util.FakeHandler {
|
|
||||||
handler := util.FakeHandler{
|
|
||||||
StatusCode: response.statusCode,
|
|
||||||
ResponseBody: *response.obj.(*string),
|
|
||||||
}
|
|
||||||
mux.Handle(url, &handler)
|
|
||||||
glog.Infof("Will handle %s", url)
|
|
||||||
return &handler
|
|
||||||
}
|
|
||||||
|
|
||||||
if responses[podListHandler] != nil {
|
|
||||||
handlers[podListHandler] = mkHandler(fmt.Sprintf("/api/v1/namespaces/%s/pods", namespace), *responses[podListHandler])
|
|
||||||
}
|
|
||||||
|
|
||||||
if responses[heapsterCpuHandler] != nil {
|
|
||||||
handlers[heapsterCpuHandler] = mkRawHandler(
|
|
||||||
fmt.Sprintf("/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster/api/v1/model/namespaces/%s/pod-list/%s/metrics/cpu-usage",
|
|
||||||
namespace, podName), *responses[heapsterCpuHandler])
|
|
||||||
}
|
|
||||||
|
|
||||||
if responses[heapsterMemHandler] != nil {
|
|
||||||
handlers[heapsterMemHandler] = mkRawHandler(
|
|
||||||
fmt.Sprintf("/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster/api/v1/model/namespaces/%s/pod-list/%s/metrics/memory-usage",
|
|
||||||
namespace, podName), *responses[heapsterMemHandler])
|
|
||||||
}
|
|
||||||
|
|
||||||
mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) {
|
|
||||||
t.Errorf("unexpected request: %v", req.RequestURI)
|
|
||||||
res.WriteHeader(http.StatusNotFound)
|
|
||||||
})
|
|
||||||
return httptest.NewServer(mux), handlers
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHeapsterResourceConsumptionGet(t *testing.T) {
|
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
|
||||||
podListResponse := serverResponse{http.StatusOK, &api.PodList{
|
return fakeResponseWrapper{raw: raw}
|
||||||
Items: []api.Pod{
|
}
|
||||||
{
|
|
||||||
|
type fakeResponseWrapper struct {
|
||||||
|
raw []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// timestamp is used for establishing order on metricPoints
|
||||||
|
type metricPoint struct {
|
||||||
|
level uint64
|
||||||
|
timestamp int
|
||||||
|
}
|
||||||
|
|
||||||
|
type testCase struct {
|
||||||
|
replicas int
|
||||||
|
desiredValue int64
|
||||||
|
desiredError error
|
||||||
|
targetResource api.ResourceName
|
||||||
|
reportedMetricsPoints [][]metricPoint
|
||||||
|
namespace string
|
||||||
|
selector map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
||||||
|
namespace := "test-namespace"
|
||||||
|
tc.namespace = namespace
|
||||||
|
podNamePrefix := "test-pod"
|
||||||
|
selector := map[string]string{"name": podNamePrefix}
|
||||||
|
tc.selector = selector
|
||||||
|
|
||||||
|
fakeClient := &testclient.Fake{}
|
||||||
|
|
||||||
|
fakeClient.AddReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
obj := &api.PodList{}
|
||||||
|
for i := 0; i < tc.replicas; i++ {
|
||||||
|
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||||
|
pod := api.Pod{
|
||||||
|
Status: api.PodStatus{
|
||||||
|
Phase: api.PodRunning,
|
||||||
|
},
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: podName,
|
Name: podName,
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
|
Labels: selector,
|
||||||
},
|
},
|
||||||
}}}}
|
}
|
||||||
|
obj.Items = append(obj.Items, pod)
|
||||||
|
}
|
||||||
|
return true, obj, nil
|
||||||
|
})
|
||||||
|
|
||||||
timestamp := time.Now()
|
fakeClient.AddProxyReactor("services", func(action testclient.Action) (handled bool, ret client.ResponseWrapper, err error) {
|
||||||
metricsCpu := heapster.MetricResultList{
|
metrics := heapster.MetricResultList{}
|
||||||
Items: []heapster.MetricResult{{
|
firstTimestamp := time.Now()
|
||||||
Metrics: []heapster.MetricPoint{{timestamp, cpu}},
|
var latestTimestamp time.Time
|
||||||
LatestTimestamp: timestamp,
|
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
|
||||||
}}}
|
var heapsterMetricPoints []heapster.MetricPoint
|
||||||
heapsterRawCpuResponse, _ := json.Marshal(&metricsCpu)
|
for _, reportedMetricPoint := range reportedMetricPoints {
|
||||||
heapsterStrCpuResponse := string(heapsterRawCpuResponse)
|
timestamp := firstTimestamp.Add(time.Duration(reportedMetricPoint.timestamp) * time.Minute)
|
||||||
heapsterCpuResponse := serverResponse{http.StatusOK, &heapsterStrCpuResponse}
|
if latestTimestamp.Before(timestamp) {
|
||||||
|
latestTimestamp = timestamp
|
||||||
|
}
|
||||||
|
heapsterMetricPoint := heapster.MetricPoint{timestamp, reportedMetricPoint.level}
|
||||||
|
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
|
||||||
|
}
|
||||||
|
metric := heapster.MetricResult{
|
||||||
|
Metrics: heapsterMetricPoints,
|
||||||
|
LatestTimestamp: latestTimestamp,
|
||||||
|
}
|
||||||
|
metrics.Items = append(metrics.Items, metric)
|
||||||
|
}
|
||||||
|
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
||||||
|
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||||
|
})
|
||||||
|
|
||||||
metricsMem := heapster.MetricResultList{
|
return fakeClient
|
||||||
Items: []heapster.MetricResult{{
|
}
|
||||||
Metrics: []heapster.MetricPoint{{timestamp, memory}},
|
|
||||||
LatestTimestamp: timestamp,
|
func (tc *testCase) verifyResults(t *testing.T, val *experimental.ResourceConsumption, err error) {
|
||||||
}}}
|
assert.Equal(t, tc.desiredError, err)
|
||||||
heapsterRawMemResponse, _ := json.Marshal(&metricsMem)
|
if tc.desiredError != nil {
|
||||||
heapsterStrMemResponse := string(heapsterRawMemResponse)
|
return
|
||||||
heapsterMemResponse := serverResponse{http.StatusOK, &heapsterStrMemResponse}
|
}
|
||||||
|
if tc.targetResource == api.ResourceCPU {
|
||||||
testServer, _ := makeTestServer(t,
|
assert.Equal(t, tc.desiredValue, val.Quantity.MilliValue())
|
||||||
map[string]*serverResponse{
|
}
|
||||||
heapsterCpuHandler: &heapsterCpuResponse,
|
if tc.targetResource == api.ResourceMemory {
|
||||||
heapsterMemHandler: &heapsterMemResponse,
|
assert.Equal(t, tc.desiredValue, val.Quantity.Value())
|
||||||
podListHandler: &podListResponse,
|
}
|
||||||
})
|
}
|
||||||
|
|
||||||
defer testServer.Close()
|
func (tc *testCase) runTest(t *testing.T) {
|
||||||
kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()})
|
testClient := tc.prepareTestClient(t)
|
||||||
|
metricsClient := NewHeapsterMetricsClient(testClient)
|
||||||
metricsClient := NewHeapsterMetricsClient(kubeClient)
|
val, err := metricsClient.ResourceConsumption(tc.namespace).Get(tc.targetResource, tc.selector)
|
||||||
|
tc.verifyResults(t, val, err)
|
||||||
val, err := metricsClient.ResourceConsumption(namespace).Get(api.ResourceCPU, map[string]string{"app": "test"})
|
}
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error while getting consumption: %v", err)
|
func TestCPU(t *testing.T) {
|
||||||
}
|
tc := testCase{
|
||||||
assert.Equal(t, int64(cpu), val.Quantity.MilliValue())
|
replicas: 3,
|
||||||
|
desiredValue: 5000,
|
||||||
val, err = metricsClient.ResourceConsumption(namespace).Get(api.ResourceMemory, map[string]string{"app": "test"})
|
targetResource: api.ResourceCPU,
|
||||||
if err != nil {
|
reportedMetricsPoints: [][]metricPoint{{{5000, 1}}, {{5000, 1}}, {{5000, 1}}},
|
||||||
t.Fatalf("Error while getting consumption: %v", err)
|
}
|
||||||
}
|
tc.runTest(t)
|
||||||
assert.Equal(t, int64(memory), val.Quantity.Value())
|
}
|
||||||
|
|
||||||
|
func TestMemory(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
desiredValue: 5000,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{5000, 1}}, {{5000, 2}}, {{5000, 4}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUSumEqualZero(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
desiredValue: 0,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemorySumEqualZero(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
desiredValue: 0,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUMoreMetrics(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 5,
|
||||||
|
desiredValue: 5000,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{
|
||||||
|
{{0, 3}, {0, 6}, {5, 4}, {9000, 10}},
|
||||||
|
{{5000, 2}, {10, 5}, {66, 1}, {0, 10}},
|
||||||
|
{{5000, 3}, {80, 5}, {6000, 10}},
|
||||||
|
{{5000, 3}, {40, 3}, {0, 9}, {200, 2}, {8000, 10}},
|
||||||
|
{{5000, 2}, {20, 2}, {2000, 10}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryMoreMetrics(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 5,
|
||||||
|
desiredValue: 5000,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{
|
||||||
|
{{0, 3}, {0, 6}, {5, 4}, {9000, 10}},
|
||||||
|
{{5000, 2}, {10, 5}, {66, 1}, {0, 10}},
|
||||||
|
{{5000, 3}, {80, 5}, {6000, 10}},
|
||||||
|
{{5000, 3}, {40, 3}, {0, 9}, {200, 2}, {8000, 10}},
|
||||||
|
{{5000, 2}, {20, 2}, {2000, 10}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUResultIsFloat(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 6,
|
||||||
|
desiredValue: 4783,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {{9500, 4}}, {{3000, 4}}, {{7000, 4}}, {{3200, 4}}, {{2000, 4}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryResultIsFloat(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 6,
|
||||||
|
desiredValue: 4783,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {{9500, 4}}, {{3000, 4}}, {{7000, 4}}, {{3200, 4}}, {{2000, 4}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUSamplesWithRandomTimestamps(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
desiredValue: 3000,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{
|
||||||
|
{{1, 1}, {3000, 3}, {2, 2}},
|
||||||
|
{{2, 2}, {1, 1}, {3000, 3}},
|
||||||
|
{{3000, 3}, {1, 1}, {2, 2}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemorySamplesWithRandomTimestamps(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
desiredValue: 3000,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{
|
||||||
|
{{1, 1}, {3000, 3}, {2, 2}},
|
||||||
|
{{2, 2}, {1, 1}, {3000, 3}},
|
||||||
|
{{3000, 3}, {1, 1}, {2, 2}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorMetricNotDefined(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 1,
|
||||||
|
desiredError: fmt.Errorf("heapster metric not defined for "),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUMissingMetrics(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
desiredError: fmt.Errorf("metrics obtained for 1/3 of pods"),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryMissingMetrics(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
desiredError: fmt.Errorf("metrics obtained for 1/3 of pods"),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUSuperfluousMetrics(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
desiredError: fmt.Errorf("metrics obtained for 6/3 of pods"),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemorySuperfluousMetrics(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
desiredError: fmt.Errorf("metrics obtained for 6/3 of pods"),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUEmptyMetrics(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
desiredError: fmt.Errorf("metrics obtained for 0/3 of pods"),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryEmptyMetrics(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
desiredError: fmt.Errorf("metrics obtained for 0/3 of pods"),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUZeroReplicas(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 0,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
desiredValue: 0,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryZeroReplicas(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 0,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
desiredValue: 0,
|
||||||
|
reportedMetricsPoints: [][]metricPoint{},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
targetResource: api.ResourceCPU,
|
||||||
|
desiredError: fmt.Errorf("metrics obtained for 2/3 of pods"),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{}, {{100, 1}}, {{400, 2}, {300, 3}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemoryEmptyMetricsForOnePod(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
replicas: 3,
|
||||||
|
targetResource: api.ResourceMemory,
|
||||||
|
desiredError: fmt.Errorf("metrics obtained for 2/3 of pods"),
|
||||||
|
reportedMetricsPoints: [][]metricPoint{{}, {{100, 1}}, {{400, 2}, {300, 3}}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue