Modularized implementation of prompush json with e2e running pending push gateway gauges.

pull/6/head
jayunit100 2015-07-09 13:53:17 -04:00
parent 7645513d2a
commit 739e79dd9f
6 changed files with 157 additions and 30 deletions

View File

@ -29,18 +29,23 @@ Looks open enough :).
## Prometheus
You can launch prometheus easily, by simply running.
You can launch prometheus, promdash, and pushgateway easily, by simply running.
`kubectl create -f contrib/prometheus/prometheus-all.json`
Then (edit the publicIP field in prometheus-service to be a public ip on one of your kubelets),
This will launch the pushgateway such that prometheus is collecting metrics from it automatically.
and run
NOTE: If you plan to use the push gateway to push metrics (for example, we use it in e2e tests)
`kubectl create -f contrib/prometheus/prometheus-service.json`
you will want to have a stable IP that you can provide the push gateway from, and you may need to modify the NodePort stuff for your cloud provider.
And then, create corresponding services, like this.. note that the pushgateway service may/may not be functional, but we leave it in for good measure.
`kubectl create -f contrib/prometheus/prometheus-service.json ; kubectl create -f contrib/prometheus/pushgateway-service.json`
Now, you can access the service `wget 10.0.1.89:9090`, and build graphs.
## How it works
This is a v1 api based, containerized prometheus ReplicationController, which scrapes endpoints which are readable on the KUBERNETES service (the internal kubernetes service running in the default namespace, which is visible to all pods).
@ -73,7 +78,6 @@ at port 9090.
- We should publish this image into the kube/ namespace.
- Possibly use postgre or mysql as a promdash database.
- push gateway (https://github.com/prometheus/pushgateway) setup.
- stop using kubectl to make a local proxy faking the old RO port and build in
real auth capabilities.

View File

@ -21,8 +21,8 @@
"spec": {
"containers": [
{
"name":"kube-promdash",
"image": "prom/promdash",
"name": "kube-promdash",
"env": [
{
"name": "DATABASE_URL",
@ -44,15 +44,14 @@
]
},
{
"command": ["./run_prometheus.sh", "-t", "KUBERNETES_RO", "-d", "/var/prometheus/"],
"image": "jayunit100/kube-prometheus",
"name": "kube-prometheus",
"ports": [
{
"containerPort": 9090,
"hostPort": 9090,
"protocol": "TCP"
}
"command": [
"./run_prometheus.sh",
"-t",
"KUBERNETES_RO,PUSHGATEWAY",
"-d",
"/var/prometheus/"
],
"env": [
{
@ -62,6 +61,21 @@
{
"name": "KUBERNETES_RO_SERVICE_PORT",
"value": "8001"
},
{
"name": "PUSHGATEWAY_SERVICE_HOST",
"value": "localhost"
},
{
"name": "PUSHGATEWAY_SERVICE_PORT",
"value": "9091"
}
],
"ports": [
{
"containerPort": 9090,
"hostPort": 9090,
"protocol": "TCP"
}
],
"volumeMounts": [
@ -72,12 +86,26 @@
]
},
{
"name": "kubectl",
"image": "gcr.io/google_containers/kubectl:v0.18.0-350-gfb3305edcf6c1a",
"args": [
"proxy", "-p", "8001", "--api-prefix=/"
"image": "jayunit100/prompush:0.2.0",
"name": "pushgateway",
"ports": [
{
"containerPort": 9091,
"hostPort": 9091,
"protocol": "TCP"
}
]
}
},
{
"image": "gcr.io/google_containers/kubectl:v0.18.0-350-gfb3305edcf6c1a",
"name": "kubectl",
"args": [
"proxy",
"-p",
"8001",
"--api-prefix=/"
]
}
],
"volumes": [
{

View File

@ -0,0 +1,22 @@
{
"kind":"Service",
"apiVersion":"v1",
"metadata":{
"name":"pushgateway",
"labels":{
"name":"pushgateway"
}
},
"spec":{
"type": "NodePort",
"ports": [
{
"port":9091,
"targetPort":9091,
"protocol":"TCP"
}],
"selector":{
"name":"kube-prometheus"
}
}
}

View File

@ -80,6 +80,7 @@ func init() {
flag.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
flag.IntVar(&testContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
flag.StringVar(&testContext.UpgradeTarget, "upgrade-target", "latest_ci", "Version to upgrade to (e.g. 'latest_stable', 'latest_release', 'latest_ci', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
flag.StringVar(&testContext.PrometheusPushGateway, "prom-push-gateway", "", "The URL to prometheus gateway, so that metrics can be pushed during e2es and scraped by prometheus. Typically something like 127.0.0.1:9091.")
}
func TestE2E(t *testing.T) {

68
test/e2e/prompush.go Normal file
View File

@ -0,0 +1,68 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//This is a utility for prometheus pushing functionality.
package e2e
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
)
// Prometheus stuff: Setup metrics.
var runningMetric = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "e2e_running",
Help: "The num of running pods",
})
var pendingMetric = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "e2e_pending",
Help: "The num of pending pods",
})
// Turn this to true after we register.
var prom_registered = false
// Reusable function for pushing metrics to prometheus. Handles initialization and so on.
func promPushRunningPending(running, pending int) error {
if testContext.PrometheusPushGateway == "" {
Logf("Ignoring prom push, push gateway unavailable")
return nil
} else {
// Register metrics if necessary
if !prom_registered && testContext.PrometheusPushGateway != "" {
prometheus.Register(runningMetric)
prometheus.Register(pendingMetric)
prom_registered = true
}
// Update metric values
runningMetric.Set(float64(running))
pendingMetric.Set(float64(pending))
// Push them to the push gateway. This will be scraped by prometheus
// provided you launch it with the pushgateway as an endpoint.
if err := prometheus.Push(
"e2e",
"none",
testContext.PrometheusPushGateway, //i.e. "127.0.0.1:9091"
); err != nil {
fmt.Println("failed at pushing to pushgateway ", err)
return err
}
}
return nil
}

View File

@ -103,18 +103,19 @@ type CloudConfig struct {
}
type TestContextType struct {
KubeConfig string
KubeContext string
CertDir string
Host string
RepoRoot string
Provider string
CloudConfig CloudConfig
KubectlPath string
OutputDir string
prefix string
MinStartupPods int
UpgradeTarget string
KubeConfig string
KubeContext string
CertDir string
Host string
RepoRoot string
Provider string
CloudConfig CloudConfig
KubectlPath string
OutputDir string
prefix string
MinStartupPods int
UpgradeTarget string
PrometheusPushGateway string
}
var testContext TestContextType
@ -1141,6 +1142,9 @@ func RunRC(config RCConfig) error {
Logf("%v %v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d unknown ",
time.Now(), rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, unknown)
promPushRunningPending(running, pending)
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%s, %d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown\n", time.Now(), running, pending, waiting, inactive, unknown)
}