Merge pull request #74647 from danielqsj/e2ein

Fix golint failures for e2e/instrumentation/...
pull/564/head
Kubernetes Prow Robot 2019-03-05 06:51:10 -08:00 committed by GitHub
commit e24e26fac8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 95 additions and 81 deletions

View File

@ -642,8 +642,6 @@ test/e2e/common
test/e2e/framework test/e2e/framework
test/e2e/framework/providers/gce test/e2e/framework/providers/gce
test/e2e/framework/providers/kubemark test/e2e/framework/providers/kubemark
test/e2e/instrumentation/logging
test/e2e/instrumentation/monitoring
test/e2e/lifecycle test/e2e/lifecycle
test/e2e/lifecycle/bootstrap test/e2e/lifecycle/bootstrap
test/e2e/network test/e2e/network

View File

@ -23,8 +23,8 @@ import (
"sync" "sync"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/config" "k8s.io/kubernetes/test/e2e/framework/config"
@ -51,15 +51,15 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
// This can expose problems in your docker configuration (logging), log searching infrastructure, to tune deployments to match high load // This can expose problems in your docker configuration (logging), log searching infrastructure, to tune deployments to match high load
// scenarios. TODO jayunit100 add this to the kube CI in a follow on infra patch. // scenarios. TODO jayunit100 add this to the kube CI in a follow on infra patch.
It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v", kbRateInSeconds, totalLogTime), func() { ginkgo.It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v", kbRateInSeconds, totalLogTime), func() {
By(fmt.Sprintf("scaling up to %v pods per node", loggingSoak.Scale)) ginkgo.By(fmt.Sprintf("scaling up to %v pods per node", loggingSoak.Scale))
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(loggingSoak.Scale) wg.Add(loggingSoak.Scale)
for i := 0; i < loggingSoak.Scale; i++ { for i := 0; i < loggingSoak.Scale; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
wave := fmt.Sprintf("wave%v", strconv.Itoa(i)) wave := fmt.Sprintf("wave%v", strconv.Itoa(i))
framework.Logf("Starting logging soak, wave = %v", wave) framework.Logf("Starting logging soak, wave = %v", wave)
RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime) RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime)
@ -79,7 +79,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items) totalPods := len(nodes.Items)
Expect(totalPods).NotTo(Equal(0)) gomega.Expect(totalPods).NotTo(gomega.Equal(0))
kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text. kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.

View File

@ -17,6 +17,6 @@ limitations under the License.
package logging package logging
import ( import (
_ "k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch" _ "k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch" // for elasticsearch provider
_ "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdriver" _ "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdriver" // for stackdriver provider
) )

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
gcm "google.golang.org/api/monitoring/v3" gcm "google.golang.org/api/monitoring/v3"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
@ -43,20 +43,20 @@ var acceleratorMetrics = []string{
} }
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
}) })
f := framework.NewDefaultFramework("stackdriver-monitoring") f := framework.NewDefaultFramework("stackdriver-monitoring")
It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func() { ginkgo.It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func() {
testStackdriverAcceleratorMonitoring(f) testStackdriverAcceleratorMonitoring(f)
}) })
}) })
func testStackdriverAcceleratorMonitoring(f *framework.Framework) { func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
projectId := framework.TestContext.CloudConfig.ProjectID projectID := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background() ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
@ -97,7 +97,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
}) })
metricsMap := map[string]bool{} metricsMap := map[string]bool{}
pollingFunction := checkForAcceleratorMetrics(projectId, gcmService, time.Now(), metricsMap) pollingFunction := checkForAcceleratorMetrics(projectID, gcmService, time.Now(), metricsMap)
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction) err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
if err != nil { if err != nil {
framework.Logf("Missing metrics: %+v", metricsMap) framework.Logf("Missing metrics: %+v", metricsMap)
@ -105,7 +105,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func checkForAcceleratorMetrics(projectId string, gcmService *gcm.Service, start time.Time, metricsMap map[string]bool) func() (bool, error) { func checkForAcceleratorMetrics(projectID string, gcmService *gcm.Service, start time.Time, metricsMap map[string]bool) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
counter := 0 counter := 0
for _, metric := range acceleratorMetrics { for _, metric := range acceleratorMetrics {
@ -113,7 +113,7 @@ func checkForAcceleratorMetrics(projectId string, gcmService *gcm.Service, start
} }
for _, metric := range acceleratorMetrics { for _, metric := range acceleratorMetrics {
// TODO: check only for metrics from this cluster // TODO: check only for metrics from this cluster
ts, err := fetchTimeSeries(projectId, gcmService, metric, start, time.Now()) ts, err := fetchTimeSeries(projectID, gcmService, metric, start, time.Now())
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(ts) > 0 { if len(ts) > 0 {
counter = counter + 1 counter = counter + 1

View File

@ -26,7 +26,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework/config" "k8s.io/kubernetes/test/e2e/framework/config"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
var cadvisor struct { var cadvisor struct {
@ -39,14 +39,15 @@ var _ = instrumentation.SIGDescribe("Cadvisor", func() {
f := framework.NewDefaultFramework("cadvisor") f := framework.NewDefaultFramework("cadvisor")
It("should be healthy on every node.", func() { ginkgo.It("should be healthy on every node.", func() {
CheckCadvisorHealthOnAllNodes(f.ClientSet, 5*time.Minute) CheckCadvisorHealthOnAllNodes(f.ClientSet, 5*time.Minute)
}) })
}) })
// CheckCadvisorHealthOnAllNodes check cadvisor health via kubelet endpoint
func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) { func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
By("getting list of nodes") ginkgo.By("getting list of nodes")
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var errors []error var errors []error
@ -58,7 +59,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
// cadvisor is not accessible directly unless its port (4194 by default) is exposed. // cadvisor is not accessible directly unless its port (4194 by default) is exposed.
// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
statsResource := fmt.Sprintf("api/v1/nodes/%s/proxy/stats/", node.Name) statsResource := fmt.Sprintf("api/v1/nodes/%s/proxy/stats/", node.Name)
By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) ginkgo.By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
_, err = c.CoreV1().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() _, err = c.CoreV1().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
if err != nil { if err != nil {
errors = append(errors, err) errors = append(errors, err)

View File

@ -18,9 +18,8 @@ package monitoring
import ( import (
"fmt" "fmt"
"strings"
"os/exec" "os/exec"
"strings"
gcm "google.golang.org/api/monitoring/v3" gcm "google.golang.org/api/monitoring/v3"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
@ -31,10 +30,15 @@ import (
) )
var ( var (
CustomMetricName = "foo" // CustomMetricName is the metrics name used in test cases.
UnusedMetricName = "unused" CustomMetricName = "foo"
CustomMetricValue = int64(448) // UnusedMetricName is the unused metrics name used in test cases.
UnusedMetricValue = int64(446) UnusedMetricName = "unused"
// CustomMetricValue is the value for CustomMetricName.
CustomMetricValue = int64(448)
// UnusedMetricValue is the value for UnusedMetricName.
UnusedMetricValue = int64(446)
// StackdriverExporter is exporter name.
StackdriverExporter = "stackdriver-exporter" StackdriverExporter = "stackdriver-exporter"
// HPAPermissions is a ClusterRoleBinding that grants unauthenticated user permissions granted for // HPAPermissions is a ClusterRoleBinding that grants unauthenticated user permissions granted for
// HPA for testing purposes, i.e. it should grant permission to read custom metrics. // HPA for testing purposes, i.e. it should grant permission to read custom metrics.
@ -55,11 +59,16 @@ var (
}, },
}, },
} }
// StagingDeploymentsLocation is the location where the adapter deployment files are stored.
StagingDeploymentsLocation = "https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-stackdriver/master/custom-metrics-stackdriver-adapter/deploy/staging/" StagingDeploymentsLocation = "https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-stackdriver/master/custom-metrics-stackdriver-adapter/deploy/staging/"
// AdapterForOldResourceModel is file name for the old resource model.
AdapterForOldResourceModel = "adapter_old_resource_model.yaml" AdapterForOldResourceModel = "adapter_old_resource_model.yaml"
// AdapterForNewResourceModel is file name for the new resource model.
AdapterForNewResourceModel = "adapter_new_resource_model.yaml" AdapterForNewResourceModel = "adapter_new_resource_model.yaml"
AdapterDefault = AdapterForOldResourceModel // AdapterDefault is the default model.
ClusterAdminBinding = "e2e-test-cluster-admin-binding" AdapterDefault = AdapterForOldResourceModel
// ClusterAdminBinding is the cluster rolebinding name for test cases.
ClusterAdminBinding = "e2e-test-cluster-admin-binding"
) )
// CustomMetricContainerSpec allows to specify a config for StackdriverExporterDeployment // CustomMetricContainerSpec allows to specify a config for StackdriverExporterDeployment
@ -273,8 +282,8 @@ func createClusterAdminBinding() error {
} }
// CreateDescriptors creates descriptors for metrics: CustomMetricName and UnusedMetricName. // CreateDescriptors creates descriptors for metrics: CustomMetricName and UnusedMetricName.
func CreateDescriptors(service *gcm.Service, projectId string) error { func CreateDescriptors(service *gcm.Service, projectID string) error {
_, err := service.Projects.MetricDescriptors.Create(fmt.Sprintf("projects/%s", projectId), &gcm.MetricDescriptor{ _, err := service.Projects.MetricDescriptors.Create(fmt.Sprintf("projects/%s", projectID), &gcm.MetricDescriptor{
Name: CustomMetricName, Name: CustomMetricName,
ValueType: "INT64", ValueType: "INT64",
Type: "custom.googleapis.com/" + CustomMetricName, Type: "custom.googleapis.com/" + CustomMetricName,
@ -283,7 +292,7 @@ func CreateDescriptors(service *gcm.Service, projectId string) error {
if err != nil { if err != nil {
return err return err
} }
_, err = service.Projects.MetricDescriptors.Create(fmt.Sprintf("projects/%s", projectId), &gcm.MetricDescriptor{ _, err = service.Projects.MetricDescriptors.Create(fmt.Sprintf("projects/%s", projectID), &gcm.MetricDescriptor{
Name: UnusedMetricName, Name: UnusedMetricName,
ValueType: "INT64", ValueType: "INT64",
Type: "custom.googleapis.com/" + UnusedMetricName, Type: "custom.googleapis.com/" + UnusedMetricName,
@ -294,12 +303,12 @@ func CreateDescriptors(service *gcm.Service, projectId string) error {
// CleanupDescriptors deletes descriptors for metrics: CustomMetricName and UnusedMetricName. // CleanupDescriptors deletes descriptors for metrics: CustomMetricName and UnusedMetricName.
// TODO: Cleanup time series as well // TODO: Cleanup time series as well
func CleanupDescriptors(service *gcm.Service, projectId string) { func CleanupDescriptors(service *gcm.Service, projectID string) {
_, err := service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectId, CustomMetricName)).Do() _, err := service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, CustomMetricName)).Do()
if err != nil { if err != nil {
framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err) framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
} }
_, err = service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectId, UnusedMetricName)).Do() _, err = service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, UnusedMetricName)).Do()
if err != nil { if err != nil {
framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err) framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
} }

View File

@ -23,7 +23,7 @@ import (
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
@ -47,13 +47,13 @@ const (
) )
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
}) })
f := framework.NewDefaultFramework("stackdriver-monitoring") f := framework.NewDefaultFramework("stackdriver-monitoring")
It("should run Custom Metrics - Stackdriver Adapter for old resource model [Feature:StackdriverCustomMetrics]", func() { ginkgo.It("should run Custom Metrics - Stackdriver Adapter for old resource model [Feature:StackdriverCustomMetrics]", func() {
kubeClient := f.ClientSet kubeClient := f.ClientSet
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
@ -68,7 +68,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel) testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel)
}) })
It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func() { ginkgo.It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func() {
kubeClient := f.ClientSet kubeClient := f.ClientSet
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
@ -83,7 +83,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel) testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel)
}) })
It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func() { ginkgo.It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func() {
kubeClient := f.ClientSet kubeClient := f.ClientSet
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
@ -95,7 +95,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
}) })
func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient, adapterDeployment string) { func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient, adapterDeployment string) {
projectId := framework.TestContext.CloudConfig.ProjectID projectID := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background() ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
@ -106,11 +106,11 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
} }
// Set up a cluster: create a custom metric and set up k8s-sd adapter // Set up a cluster: create a custom metric and set up k8s-sd adapter
err = CreateDescriptors(gcmService, projectId) err = CreateDescriptors(gcmService, projectID)
if err != nil { if err != nil {
framework.Failf("Failed to create metric descriptor: %s", err) framework.Failf("Failed to create metric descriptor: %s", err)
} }
defer CleanupDescriptors(gcmService, projectId) defer CleanupDescriptors(gcmService, projectID)
err = CreateAdapter(adapterDeployment) err = CreateAdapter(adapterDeployment)
if err != nil { if err != nil {
@ -141,7 +141,7 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c
// TODO(kawych): migrate this test to new resource model // TODO(kawych): migrate this test to new resource model
func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, externalMetricsClient externalclient.ExternalMetricsClient) { func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, externalMetricsClient externalclient.ExternalMetricsClient) {
projectId := framework.TestContext.CloudConfig.ProjectID projectID := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background() ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
@ -152,11 +152,11 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface,
} }
// Set up a cluster: create a custom metric and set up k8s-sd adapter // Set up a cluster: create a custom metric and set up k8s-sd adapter
err = CreateDescriptors(gcmService, projectId) err = CreateDescriptors(gcmService, projectID)
if err != nil { if err != nil {
framework.Failf("Failed to create metric descriptor: %s", err) framework.Failf("Failed to create metric descriptor: %s", err)
} }
defer CleanupDescriptors(gcmService, projectId) defer CleanupDescriptors(gcmService, projectID)
// Both deployments - for old and new resource model - expose External Metrics API. // Both deployments - for old and new resource model - expose External Metrics API.
err = CreateAdapter(AdapterForOldResourceModel) err = CreateAdapter(AdapterForOldResourceModel)

View File

@ -25,7 +25,7 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
@ -47,24 +47,24 @@ const (
) )
var _ = instrumentation.SIGDescribe("[Feature:PrometheusMonitoring] Prometheus", func() { var _ = instrumentation.SIGDescribe("[Feature:PrometheusMonitoring] Prometheus", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessPrometheusMonitoringIsEnabled() framework.SkipUnlessPrometheusMonitoringIsEnabled()
}) })
f := framework.NewDefaultFramework("prometheus-monitoring") f := framework.NewDefaultFramework("prometheus-monitoring")
It("should scrape container metrics from all nodes.", func() { ginkgo.It("should scrape container metrics from all nodes.", func() {
expectedNodes, err := getAllNodes(f.ClientSet) expectedNodes, err := getAllNodes(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
retryUntilSucceeds(func() error { retryUntilSucceeds(func() error {
return validateMetricAvailableForAllNodes(f.ClientSet, `container_cpu_usage_seconds_total`, expectedNodes) return validateMetricAvailableForAllNodes(f.ClientSet, `container_cpu_usage_seconds_total`, expectedNodes)
}, prometheusTestTimeout) }, prometheusTestTimeout)
}) })
It("should successfully scrape all targets", func() { ginkgo.It("should successfully scrape all targets", func() {
retryUntilSucceeds(func() error { retryUntilSucceeds(func() error {
return validateAllActiveTargetsAreHealthy(f.ClientSet) return validateAllActiveTargetsAreHealthy(f.ClientSet)
}, prometheusTestTimeout) }, prometheusTestTimeout)
}) })
It("should contain correct container CPU metric.", func() { ginkgo.It("should contain correct container CPU metric.", func() {
query := prometheusCPUQuery(f.Namespace.Name, "prometheus-cpu-consumer", prometheusRate) query := prometheusCPUQuery(f.Namespace.Name, "prometheus-cpu-consumer", prometheusRate)
consumer := consumeCPUResources(f, "prometheus-cpu-consumer", targetCPUUsage*1000) consumer := consumeCPUResources(f, "prometheus-cpu-consumer", targetCPUUsage*1000)
defer consumer.CleanUp() defer consumer.CleanUp()
@ -72,7 +72,7 @@ var _ = instrumentation.SIGDescribe("[Feature:PrometheusMonitoring] Prometheus",
return validateQueryReturnsCorrectValues(f.ClientSet, query, targetCPUUsage, 3, prometheusMetricErrorTolerance) return validateQueryReturnsCorrectValues(f.ClientSet, query, targetCPUUsage, 3, prometheusMetricErrorTolerance)
}, prometheusTestTimeout) }, prometheusTestTimeout)
}) })
It("should scrape metrics from annotated pods.", func() { ginkgo.It("should scrape metrics from annotated pods.", func() {
query := prometheusPodCustomMetricQuery(f.Namespace.Name, "prometheus-custom-pod-metric") query := prometheusPodCustomMetricQuery(f.Namespace.Name, "prometheus-custom-pod-metric")
consumer := exportCustomMetricFromPod(f, "prometheus-custom-pod-metric", customMetricValue) consumer := exportCustomMetricFromPod(f, "prometheus-custom-pod-metric", customMetricValue)
defer consumer.CleanUp() defer consumer.CleanUp()
@ -80,7 +80,7 @@ var _ = instrumentation.SIGDescribe("[Feature:PrometheusMonitoring] Prometheus",
return validateQueryReturnsCorrectValues(f.ClientSet, query, customMetricValue, 1, prometheusMetricErrorTolerance) return validateQueryReturnsCorrectValues(f.ClientSet, query, customMetricValue, 1, prometheusMetricErrorTolerance)
}, prometheusTestTimeout) }, prometheusTestTimeout)
}) })
It("should scrape metrics from annotated services.", func() { ginkgo.It("should scrape metrics from annotated services.", func() {
query := prometheusServiceCustomMetricQuery(f.Namespace.Name, "prometheus-custom-service-metric") query := prometheusServiceCustomMetricQuery(f.Namespace.Name, "prometheus-custom-service-metric")
consumer := exportCustomMetricFromService(f, "prometheus-custom-service-metric", customMetricValue) consumer := exportCustomMetricFromService(f, "prometheus-custom-service-metric", customMetricValue)
defer consumer.CleanUp() defer consumer.CleanUp()
@ -251,10 +251,13 @@ type promTargetsResponse struct {
Data TargetDiscovery `json:"data"` Data TargetDiscovery `json:"data"`
} }
// TargetDiscovery has all the active targets.
type TargetDiscovery struct { type TargetDiscovery struct {
ActiveTargets []*Target `json:"activeTargets"` ActiveTargets []*Target `json:"activeTargets"`
DroppedTargets []*DroppedTarget `json:"droppedTargets"` DroppedTargets []*DroppedTarget `json:"droppedTargets"`
} }
// Target has the information for one target.
type Target struct { type Target struct {
DiscoveredLabels map[string]string `json:"discoveredLabels"` DiscoveredLabels map[string]string `json:"discoveredLabels"`
Labels map[string]string `json:"labels"` Labels map[string]string `json:"labels"`
@ -266,17 +269,20 @@ type Target struct {
Health TargetHealth `json:"health"` Health TargetHealth `json:"health"`
} }
// DroppedTarget has the information for one target that was dropped during relabelling.
type DroppedTarget struct { type DroppedTarget struct {
// Labels before any processing. // Labels before any processing.
DiscoveredLabels map[string]string `json:"discoveredLabels"` DiscoveredLabels map[string]string `json:"discoveredLabels"`
} }
// The possible health states of a target based on the last performed scrape.
const ( const (
HealthUnknown TargetHealth = "unknown" HealthUnknown TargetHealth = "unknown"
HealthGood TargetHealth = "up" HealthGood TargetHealth = "up"
HealthBad TargetHealth = "down" HealthBad TargetHealth = "down"
) )
// TargetHealth describes the health state of a target.
type TargetHealth string type TargetHealth string
func queryPrometheus(c clientset.Interface, query string, start, end time.Time, step time.Duration) (model.Value, error) { func queryPrometheus(c clientset.Interface, query string, start, end time.Time, step time.Duration) (model.Value, error) {

View File

@ -25,7 +25,7 @@ import (
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -59,20 +59,20 @@ var (
) )
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
}) })
f := framework.NewDefaultFramework("stackdriver-monitoring") f := framework.NewDefaultFramework("stackdriver-monitoring")
It("should have cluster metrics [Feature:StackdriverMonitoring]", func() { ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func() {
testStackdriverMonitoring(f, 1, 100, 200) testStackdriverMonitoring(f, 1, 100, 200)
}) })
}) })
func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, perPodCPU int64) { func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, perPodCPU int64) {
projectId := framework.TestContext.CloudConfig.ProjectID projectID := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background() ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope) client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
@ -107,7 +107,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
rc.WaitForReplicas(pods, 15*time.Minute) rc.WaitForReplicas(pods, 15*time.Minute)
metricsMap := map[string]bool{} metricsMap := map[string]bool{}
pollingFunction := checkForMetrics(projectId, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU) pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction) err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
if err != nil { if err != nil {
framework.Logf("Missing metrics: %+v\n", metricsMap) framework.Logf("Missing metrics: %+v\n", metricsMap)
@ -115,7 +115,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func checkForMetrics(projectId string, gcmService *gcm.Service, start time.Time, metricsMap map[string]bool, cpuUsed int, cpuLimit int64) func() (bool, error) { func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time, metricsMap map[string]bool, cpuUsed int, cpuLimit int64) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
counter := 0 counter := 0
correctUtilization := false correctUtilization := false
@ -124,7 +124,7 @@ func checkForMetrics(projectId string, gcmService *gcm.Service, start time.Time,
} }
for _, metric := range stackdriverMetrics { for _, metric := range stackdriverMetrics {
// TODO: check only for metrics from this cluster // TODO: check only for metrics from this cluster
ts, err := fetchTimeSeries(projectId, gcmService, metric, start, time.Now()) ts, err := fetchTimeSeries(projectID, gcmService, metric, start, time.Now())
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(ts) > 0 { if len(ts) > 0 {
counter = counter + 1 counter = counter + 1
@ -134,7 +134,7 @@ func checkForMetrics(projectId string, gcmService *gcm.Service, start time.Time,
framework.Logf("No timeseries for metric %v\n", metric) framework.Logf("No timeseries for metric %v\n", metric)
} }
var sum float64 = 0 var sum float64
switch metric { switch metric {
case "cpu/utilization": case "cpu/utilization":
for _, t := range ts { for _, t := range ts {
@ -154,9 +154,8 @@ func checkForMetrics(projectId string, gcmService *gcm.Service, start time.Time,
framework.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit)) framework.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) { if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) {
return false, nil return false, nil
} else {
correctUtilization = true
} }
correctUtilization = true
} }
} }
if counter < 9 || !correctUtilization { if counter < 9 || !correctUtilization {
@ -166,14 +165,14 @@ func checkForMetrics(projectId string, gcmService *gcm.Service, start time.Time,
} }
} }
func createMetricFilter(metric string, container_name string) string { func createMetricFilter(metric string, containerName string) string {
return fmt.Sprintf(`metric.type="container.googleapis.com/container/%s" AND return fmt.Sprintf(`metric.type="container.googleapis.com/container/%s" AND
resource.label.container_name="%s"`, metric, container_name) resource.label.container_name="%s"`, metric, containerName)
} }
func fetchTimeSeries(projectId string, gcmService *gcm.Service, metric string, start time.Time, end time.Time) ([]*gcm.TimeSeries, error) { func fetchTimeSeries(projectID string, gcmService *gcm.Service, metric string, start time.Time, end time.Time) ([]*gcm.TimeSeries, error) {
response, err := gcmService.Projects.TimeSeries. response, err := gcmService.Projects.TimeSeries.
List(fullProjectName(projectId)). List(fullProjectName(projectID)).
Filter(createMetricFilter(metric, rcName)). Filter(createMetricFilter(metric, rcName)).
IntervalStartTime(start.Format(time.RFC3339)). IntervalStartTime(start.Format(time.RFC3339)).
IntervalEndTime(end.Format(time.RFC3339)). IntervalEndTime(end.Format(time.RFC3339)).

View File

@ -17,52 +17,51 @@ limitations under the License.
package monitoring package monitoring
import ( import (
"time"
"golang.org/x/oauth2/google"
clientset "k8s.io/client-go/kubernetes"
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
. "github.com/onsi/ginkgo"
"io/ioutil" "io/ioutil"
"reflect"
"time"
"github.com/onsi/ginkgo"
"golang.org/x/oauth2/google"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
"reflect"
) )
const ( const (
// Time to wait after a pod creation for it's metadata to be exported // Time to wait after a pod creation for it's metadata to be exported
metadataWaitTime = 120 * time.Second metadataWaitTime = 120 * time.Second
// Scope for Stackdriver Metadata API // MonitoringScope is the scope for Stackdriver Metadata API
MonitoringScope = "https://www.googleapis.com/auth/monitoring" MonitoringScope = "https://www.googleapis.com/auth/monitoring"
) )
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
}) })
f := framework.NewDefaultFramework("stackdriver-monitoring") f := framework.NewDefaultFramework("stackdriver-monitoring")
var kubeClient clientset.Interface var kubeClient clientset.Interface
It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func() { ginkgo.It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func() {
kubeClient = f.ClientSet kubeClient = f.ClientSet
testAgent(f, kubeClient) testAgent(f, kubeClient)
}) })
}) })
func testAgent(f *framework.Framework, kubeClient clientset.Interface) { func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
projectId := framework.TestContext.CloudConfig.ProjectID projectID := framework.TestContext.CloudConfig.ProjectID
resourceType := "k8s_container" resourceType := "k8s_container"
uniqueContainerName := fmt.Sprintf("test-container-%v", time.Now().Unix()) uniqueContainerName := fmt.Sprintf("test-container-%v", time.Now().Unix())
endpoint := fmt.Sprintf( endpoint := fmt.Sprintf(
"https://stackdriver.googleapis.com/v1beta2/projects/%v/resourceMetadata?filter=resource.type%%3D%v+AND+resource.label.container_name%%3D%v", "https://stackdriver.googleapis.com/v1beta2/projects/%v/resourceMetadata?filter=resource.type%%3D%v+AND+resource.label.container_name%%3D%v",
projectId, projectID,
resourceType, resourceType,
uniqueContainerName) uniqueContainerName)
@ -101,10 +100,12 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
} }
} }
// Metadata has the information fetched from Stackdriver metadata API.
type Metadata struct { type Metadata struct {
Results []map[string]interface{} Results []map[string]interface{}
} }
// Resource contains the resource type and labels from Stackdriver metadata API.
type Resource struct { type Resource struct {
resourceType string resourceType string
resourceLabels map[string]string resourceLabels map[string]string
@ -135,7 +136,7 @@ func verifyPodExists(response []byte, containerName string) (bool, error) {
} }
func parseResource(resource interface{}) (*Resource, error) { func parseResource(resource interface{}) (*Resource, error) {
var labels map[string]string = map[string]string{} labels := map[string]string{}
resourceMap, ok := resource.(map[string]interface{}) resourceMap, ok := resource.(map[string]interface{})
if !ok { if !ok {
return nil, fmt.Errorf("Resource entry is of type %s, expected map[string]interface{}", reflect.TypeOf(resource)) return nil, fmt.Errorf("Resource entry is of type %s, expected map[string]interface{}", reflect.TypeOf(resource))