mirror of https://github.com/prometheus/prometheus
Merge branch 'master' into dev-2.0
commit
397f001ac5
|
@ -246,6 +246,11 @@ func resolveFilepaths(baseDir string, cfg *Config) {
|
||||||
mcfg.TLSConfig.CertFile = join(mcfg.TLSConfig.CertFile)
|
mcfg.TLSConfig.CertFile = join(mcfg.TLSConfig.CertFile)
|
||||||
mcfg.TLSConfig.KeyFile = join(mcfg.TLSConfig.KeyFile)
|
mcfg.TLSConfig.KeyFile = join(mcfg.TLSConfig.KeyFile)
|
||||||
}
|
}
|
||||||
|
for _, consulcfg := range cfg.ConsulSDConfigs {
|
||||||
|
consulcfg.TLSConfig.CAFile = join(consulcfg.TLSConfig.CAFile)
|
||||||
|
consulcfg.TLSConfig.CertFile = join(consulcfg.TLSConfig.CertFile)
|
||||||
|
consulcfg.TLSConfig.KeyFile = join(consulcfg.TLSConfig.KeyFile)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cfg := range cfg.ScrapeConfigs {
|
for _, cfg := range cfg.ScrapeConfigs {
|
||||||
|
@ -584,7 +589,7 @@ func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AlertmanagersConfig configures how Alertmanagers can be discovered and communicated with.
|
// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with.
|
||||||
type AlertmanagerConfig struct {
|
type AlertmanagerConfig struct {
|
||||||
// We cannot do proper Go type embedding below as the parser will then parse
|
// We cannot do proper Go type embedding below as the parser will then parse
|
||||||
// values arbitrarily into the overflow maps of further-down types.
|
// values arbitrarily into the overflow maps of further-down types.
|
||||||
|
@ -824,6 +829,7 @@ type ConsulSDConfig struct {
|
||||||
// Defaults to all services if empty.
|
// Defaults to all services if empty.
|
||||||
Services []string `yaml:"services"`
|
Services []string `yaml:"services"`
|
||||||
|
|
||||||
|
TLSConfig TLSConfig `yaml:"tls_config,omitempty"`
|
||||||
// Catches all undefined fields and must be empty after parsing.
|
// Catches all undefined fields and must be empty after parsing.
|
||||||
XXX map[string]interface{} `yaml:",inline"`
|
XXX map[string]interface{} `yaml:",inline"`
|
||||||
}
|
}
|
||||||
|
@ -949,8 +955,10 @@ func (c *MarathonSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KubernetesRole is role of the service in Kubernetes.
|
||||||
type KubernetesRole string
|
type KubernetesRole string
|
||||||
|
|
||||||
|
// The valid options for KubernetesRole.
|
||||||
const (
|
const (
|
||||||
KubernetesRoleNode = "node"
|
KubernetesRoleNode = "node"
|
||||||
KubernetesRolePod = "pod"
|
KubernetesRolePod = "pod"
|
||||||
|
@ -958,6 +966,7 @@ const (
|
||||||
KubernetesRoleEndpoint = "endpoints"
|
KubernetesRoleEndpoint = "endpoints"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *KubernetesRole) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *KubernetesRole) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
if err := unmarshal((*string)(c)); err != nil {
|
if err := unmarshal((*string)(c)); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1226,6 +1235,17 @@ func (c *RelabelConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
if c.Action == RelabelHashMod && !model.LabelName(c.TargetLabel).IsValid() {
|
if c.Action == RelabelHashMod && !model.LabelName(c.TargetLabel).IsValid() {
|
||||||
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
|
return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Action == RelabelLabelDrop || c.Action == RelabelLabelKeep {
|
||||||
|
if c.SourceLabels != nil ||
|
||||||
|
c.TargetLabel != DefaultRelabelConfig.TargetLabel ||
|
||||||
|
c.Modulus != DefaultRelabelConfig.Modulus ||
|
||||||
|
c.Separator != DefaultRelabelConfig.Separator ||
|
||||||
|
c.Replacement != DefaultRelabelConfig.Replacement {
|
||||||
|
return fmt.Errorf("%s action requires only 'regex', and no other fields", c.Action)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -247,7 +247,13 @@ var expectedConf = &Config{
|
||||||
Server: "localhost:1234",
|
Server: "localhost:1234",
|
||||||
Services: []string{"nginx", "cache", "mysql"},
|
Services: []string{"nginx", "cache", "mysql"},
|
||||||
TagSeparator: DefaultConsulSDConfig.TagSeparator,
|
TagSeparator: DefaultConsulSDConfig.TagSeparator,
|
||||||
Scheme: DefaultConsulSDConfig.Scheme,
|
Scheme: "https",
|
||||||
|
TLSConfig: TLSConfig{
|
||||||
|
CertFile: "testdata/valid_cert_file",
|
||||||
|
KeyFile: "testdata/valid_key_file",
|
||||||
|
CAFile: "testdata/valid_ca_file",
|
||||||
|
InsecureSkipVerify: false,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -538,6 +544,36 @@ var expectedErrors = []struct {
|
||||||
}, {
|
}, {
|
||||||
filename: "modulus_missing.bad.yml",
|
filename: "modulus_missing.bad.yml",
|
||||||
errMsg: "relabel configuration for hashmod requires non-zero modulus",
|
errMsg: "relabel configuration for hashmod requires non-zero modulus",
|
||||||
|
}, {
|
||||||
|
filename: "labelkeep.bad.yml",
|
||||||
|
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labelkeep2.bad.yml",
|
||||||
|
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labelkeep3.bad.yml",
|
||||||
|
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labelkeep4.bad.yml",
|
||||||
|
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labelkeep5.bad.yml",
|
||||||
|
errMsg: "labelkeep action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labeldrop.bad.yml",
|
||||||
|
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labeldrop2.bad.yml",
|
||||||
|
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labeldrop3.bad.yml",
|
||||||
|
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labeldrop4.bad.yml",
|
||||||
|
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||||
|
}, {
|
||||||
|
filename: "labeldrop5.bad.yml",
|
||||||
|
errMsg: "labeldrop action requires only 'regex', and no other fields",
|
||||||
}, {
|
}, {
|
||||||
filename: "rules.bad.yml",
|
filename: "rules.bad.yml",
|
||||||
errMsg: "invalid rule file path",
|
errMsg: "invalid rule file path",
|
||||||
|
|
|
@ -114,6 +114,12 @@ scrape_configs:
|
||||||
consul_sd_configs:
|
consul_sd_configs:
|
||||||
- server: 'localhost:1234'
|
- server: 'localhost:1234'
|
||||||
services: ['nginx', 'cache', 'mysql']
|
services: ['nginx', 'cache', 'mysql']
|
||||||
|
scheme: https
|
||||||
|
tls_config:
|
||||||
|
ca_file: valid_ca_file
|
||||||
|
cert_file: valid_cert_file
|
||||||
|
key_file: valid_key_file
|
||||||
|
insecure_skip_verify: false
|
||||||
|
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
- source_labels: [__meta_sd_consul_tags]
|
- source_labels: [__meta_sd_consul_tags]
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [abcdef]
|
||||||
|
action: labeldrop
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- modulus: 8
|
||||||
|
action: labeldrop
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- separator: ','
|
||||||
|
action: labeldrop
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- replacement: yolo-{1}
|
||||||
|
action: labeldrop
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- target_label: yolo
|
||||||
|
action: labeldrop
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- source_labels: [abcdef]
|
||||||
|
action: labelkeep
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- modulus: 8
|
||||||
|
action: labelkeep
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- separator: ','
|
||||||
|
action: labelkeep
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- replacement: yolo-{1}
|
||||||
|
action: labelkeep
|
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
relabel_configs:
|
||||||
|
- target_label: yolo
|
||||||
|
action: labelkeep
|
|
@ -16,6 +16,7 @@ package consul
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -24,9 +25,9 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/log"
|
"github.com/prometheus/common/log"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
"github.com/prometheus/prometheus/util/httputil"
|
||||||
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -92,6 +93,13 @@ type Discovery struct {
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery for the given config.
|
// NewDiscovery returns a new Discovery for the given config.
|
||||||
func NewDiscovery(conf *config.ConsulSDConfig) (*Discovery, error) {
|
func NewDiscovery(conf *config.ConsulSDConfig) (*Discovery, error) {
|
||||||
|
tls, err := httputil.NewTLSConfig(conf.TLSConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
transport := &http.Transport{TLSClientConfig: tls}
|
||||||
|
wrapper := &http.Client{Transport: transport}
|
||||||
|
|
||||||
clientConf := &consul.Config{
|
clientConf := &consul.Config{
|
||||||
Address: conf.Server,
|
Address: conf.Server,
|
||||||
Scheme: conf.Scheme,
|
Scheme: conf.Scheme,
|
||||||
|
@ -101,6 +109,7 @@ func NewDiscovery(conf *config.ConsulSDConfig) (*Discovery, error) {
|
||||||
Username: conf.Username,
|
Username: conf.Username,
|
||||||
Password: conf.Password,
|
Password: conf.Password,
|
||||||
},
|
},
|
||||||
|
HttpClient: wrapper,
|
||||||
}
|
}
|
||||||
client, err := consul.NewClient(clientConf)
|
client, err := consul.NewClient(clientConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/util/httputil"
|
"github.com/prometheus/prometheus/util/httputil"
|
||||||
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -45,6 +46,11 @@ const (
|
||||||
// taskLabel contains the mesos task name of the app instance.
|
// taskLabel contains the mesos task name of the app instance.
|
||||||
taskLabel model.LabelName = metaLabelPrefix + "task"
|
taskLabel model.LabelName = metaLabelPrefix + "task"
|
||||||
|
|
||||||
|
// portMappingLabelPrefix is the prefix for the application portMappings labels.
|
||||||
|
portMappingLabelPrefix = metaLabelPrefix + "port_mapping_label_"
|
||||||
|
// portDefinitionLabelPrefix is the prefix for the application portDefinitions labels.
|
||||||
|
portDefinitionLabelPrefix = metaLabelPrefix + "port_definition_label_"
|
||||||
|
|
||||||
// Constants for instrumentation.
|
// Constants for instrumentation.
|
||||||
namespace = "prometheus"
|
namespace = "prometheus"
|
||||||
)
|
)
|
||||||
|
@ -188,9 +194,15 @@ type Task struct {
|
||||||
Ports []uint32 `json:"ports"`
|
Ports []uint32 `json:"ports"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PortMappings describes in which port the process are binding inside the docker container.
|
||||||
|
type PortMappings struct {
|
||||||
|
Labels map[string]string `json:"labels"`
|
||||||
|
}
|
||||||
|
|
||||||
// DockerContainer describes a container which uses the docker runtime.
|
// DockerContainer describes a container which uses the docker runtime.
|
||||||
type DockerContainer struct {
|
type DockerContainer struct {
|
||||||
Image string `json:"image"`
|
Image string `json:"image"`
|
||||||
|
PortMappings []PortMappings `json:"portMappings"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container describes the runtime an app in running in.
|
// Container describes the runtime an app in running in.
|
||||||
|
@ -198,13 +210,19 @@ type Container struct {
|
||||||
Docker DockerContainer `json:"docker"`
|
Docker DockerContainer `json:"docker"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PortDefinitions describes which load balancer port you should access to access the service.
|
||||||
|
type PortDefinitions struct {
|
||||||
|
Labels map[string]string `json:"labels"`
|
||||||
|
}
|
||||||
|
|
||||||
// App describes a service running on Marathon.
|
// App describes a service running on Marathon.
|
||||||
type App struct {
|
type App struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Tasks []Task `json:"tasks"`
|
Tasks []Task `json:"tasks"`
|
||||||
RunningTasks int `json:"tasksRunning"`
|
RunningTasks int `json:"tasksRunning"`
|
||||||
Labels map[string]string `json:"labels"`
|
Labels map[string]string `json:"labels"`
|
||||||
Container Container `json:"container"`
|
Container Container `json:"container"`
|
||||||
|
PortDefinitions []PortDefinitions `json:"portDefinitions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppList is a list of Marathon apps.
|
// AppList is a list of Marathon apps.
|
||||||
|
@ -285,7 +303,7 @@ func createTargetGroup(app *App) *config.TargetGroup {
|
||||||
}
|
}
|
||||||
|
|
||||||
for ln, lv := range app.Labels {
|
for ln, lv := range app.Labels {
|
||||||
ln = appLabelPrefix + ln
|
ln = appLabelPrefix + strutil.SanitizeLabelName(ln)
|
||||||
tg.Labels[model.LabelName(ln)] = model.LabelValue(lv)
|
tg.Labels[model.LabelName(ln)] = model.LabelValue(lv)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,15 +316,30 @@ func targetsForApp(app *App) []model.LabelSet {
|
||||||
if len(t.Ports) == 0 {
|
if len(t.Ports) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
target := targetForTask(&t)
|
for i := 0; i < len(t.Ports); i++ {
|
||||||
targets = append(targets, model.LabelSet{
|
targetAddress := targetForTask(&t, i)
|
||||||
model.AddressLabel: model.LabelValue(target),
|
target := model.LabelSet{
|
||||||
taskLabel: model.LabelValue(t.ID),
|
model.AddressLabel: model.LabelValue(targetAddress),
|
||||||
})
|
taskLabel: model.LabelValue(t.ID),
|
||||||
|
}
|
||||||
|
if i < len(app.PortDefinitions) {
|
||||||
|
for ln, lv := range app.PortDefinitions[i].Labels {
|
||||||
|
ln = portDefinitionLabelPrefix + strutil.SanitizeLabelName(ln)
|
||||||
|
target[model.LabelName(ln)] = model.LabelValue(lv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i < len(app.Container.Docker.PortMappings) {
|
||||||
|
for ln, lv := range app.Container.Docker.PortMappings[i].Labels {
|
||||||
|
ln = portMappingLabelPrefix + strutil.SanitizeLabelName(ln)
|
||||||
|
target[model.LabelName(ln)] = model.LabelValue(lv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
targets = append(targets, target)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return targets
|
return targets
|
||||||
}
|
}
|
||||||
|
|
||||||
func targetForTask(task *Task) string {
|
func targetForTask(task *Task, index int) string {
|
||||||
return net.JoinHostPort(task.Host, fmt.Sprintf("%d", task.Ports[0]))
|
return net.JoinHostPort(task.Host, fmt.Sprintf("%d", task.Ports[index]))
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,7 +80,12 @@ func marathonTestAppList(labels map[string]string, runningTasks int) *AppList {
|
||||||
Host: "mesos-slave1",
|
Host: "mesos-slave1",
|
||||||
Ports: []uint32{31000},
|
Ports: []uint32{31000},
|
||||||
}
|
}
|
||||||
docker = DockerContainer{Image: "repo/image:tag"}
|
docker = DockerContainer{
|
||||||
|
Image: "repo/image:tag",
|
||||||
|
PortMappings: []PortMappings{
|
||||||
|
{Labels: labels},
|
||||||
|
},
|
||||||
|
}
|
||||||
container = Container{Docker: docker}
|
container = Container{Docker: docker}
|
||||||
app = App{
|
app = App{
|
||||||
ID: "test-service",
|
ID: "test-service",
|
||||||
|
@ -88,6 +93,9 @@ func marathonTestAppList(labels map[string]string, runningTasks int) *AppList {
|
||||||
RunningTasks: runningTasks,
|
RunningTasks: runningTasks,
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
Container: container,
|
Container: container,
|
||||||
|
PortDefinitions: []PortDefinitions{
|
||||||
|
{Labels: make(map[string]string)},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
return &AppList{
|
return &AppList{
|
||||||
|
@ -119,6 +127,12 @@ func TestMarathonSDSendGroup(t *testing.T) {
|
||||||
if tgt[model.AddressLabel] != "mesos-slave1:31000" {
|
if tgt[model.AddressLabel] != "mesos-slave1:31000" {
|
||||||
t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel])
|
t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel])
|
||||||
}
|
}
|
||||||
|
if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" {
|
||||||
|
t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
t.Fatal("Did not get a target group.")
|
t.Fatal("Did not get a target group.")
|
||||||
}
|
}
|
||||||
|
@ -189,6 +203,83 @@ func TestMarathonSDRunAndStop(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func marathonTestAppListWithMutiplePorts(labels map[string]string, runningTasks int) *AppList {
|
||||||
|
var (
|
||||||
|
task = Task{
|
||||||
|
ID: "test-task-1",
|
||||||
|
Host: "mesos-slave1",
|
||||||
|
Ports: []uint32{31000, 32000},
|
||||||
|
}
|
||||||
|
docker = DockerContainer{
|
||||||
|
Image: "repo/image:tag",
|
||||||
|
PortMappings: []PortMappings{
|
||||||
|
{Labels: labels},
|
||||||
|
{Labels: make(map[string]string)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container = Container{Docker: docker}
|
||||||
|
app = App{
|
||||||
|
ID: "test-service",
|
||||||
|
Tasks: []Task{task},
|
||||||
|
RunningTasks: runningTasks,
|
||||||
|
Labels: labels,
|
||||||
|
Container: container,
|
||||||
|
PortDefinitions: []PortDefinitions{
|
||||||
|
{Labels: make(map[string]string)},
|
||||||
|
{Labels: labels},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return &AppList{
|
||||||
|
Apps: []App{app},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarathonSDSendGroupWithMutiplePort(t *testing.T) {
|
||||||
|
var (
|
||||||
|
ch = make(chan []*config.TargetGroup, 1)
|
||||||
|
client = func(client *http.Client, url, token string) (*AppList, error) {
|
||||||
|
return marathonTestAppListWithMutiplePorts(marathonValidLabel, 1), nil
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if err := testUpdateServices(client, ch); err != nil {
|
||||||
|
t.Fatalf("Got error: %s", err)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case tgs := <-ch:
|
||||||
|
tg := tgs[0]
|
||||||
|
|
||||||
|
if tg.Source != "test-service" {
|
||||||
|
t.Fatalf("Wrong target group name: %s", tg.Source)
|
||||||
|
}
|
||||||
|
if len(tg.Targets) != 2 {
|
||||||
|
t.Fatalf("Wrong number of targets: %v", tg.Targets)
|
||||||
|
}
|
||||||
|
tgt := tg.Targets[0]
|
||||||
|
if tgt[model.AddressLabel] != "mesos-slave1:31000" {
|
||||||
|
t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" {
|
||||||
|
t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
tgt = tg.Targets[1]
|
||||||
|
if tgt[model.AddressLabel] != "mesos-slave1:32000" {
|
||||||
|
t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "yes" {
|
||||||
|
t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatal("Did not get a target group.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int) *AppList {
|
func marathonTestZeroTaskPortAppList(labels map[string]string, runningTasks int) *AppList {
|
||||||
var (
|
var (
|
||||||
task = Task{
|
task = Task{
|
||||||
|
@ -235,3 +326,149 @@ func TestMarathonZeroTaskPorts(t *testing.T) {
|
||||||
t.Fatal("Did not get a target group.")
|
t.Fatal("Did not get a target group.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func marathonTestAppListWithoutPortMappings(labels map[string]string, runningTasks int) *AppList {
|
||||||
|
var (
|
||||||
|
task = Task{
|
||||||
|
ID: "test-task-1",
|
||||||
|
Host: "mesos-slave1",
|
||||||
|
Ports: []uint32{31000, 32000},
|
||||||
|
}
|
||||||
|
docker = DockerContainer{
|
||||||
|
Image: "repo/image:tag",
|
||||||
|
}
|
||||||
|
container = Container{Docker: docker}
|
||||||
|
app = App{
|
||||||
|
ID: "test-service",
|
||||||
|
Tasks: []Task{task},
|
||||||
|
RunningTasks: runningTasks,
|
||||||
|
Labels: labels,
|
||||||
|
Container: container,
|
||||||
|
PortDefinitions: []PortDefinitions{
|
||||||
|
{Labels: make(map[string]string)},
|
||||||
|
{Labels: labels},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return &AppList{
|
||||||
|
Apps: []App{app},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarathonSDSendGroupWithoutPortMappings(t *testing.T) {
|
||||||
|
var (
|
||||||
|
ch = make(chan []*config.TargetGroup, 1)
|
||||||
|
client = func(client *http.Client, url, token string) (*AppList, error) {
|
||||||
|
return marathonTestAppListWithoutPortMappings(marathonValidLabel, 1), nil
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if err := testUpdateServices(client, ch); err != nil {
|
||||||
|
t.Fatalf("Got error: %s", err)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case tgs := <-ch:
|
||||||
|
tg := tgs[0]
|
||||||
|
|
||||||
|
if tg.Source != "test-service" {
|
||||||
|
t.Fatalf("Wrong target group name: %s", tg.Source)
|
||||||
|
}
|
||||||
|
if len(tg.Targets) != 2 {
|
||||||
|
t.Fatalf("Wrong number of targets: %v", tg.Targets)
|
||||||
|
}
|
||||||
|
tgt := tg.Targets[0]
|
||||||
|
if tgt[model.AddressLabel] != "mesos-slave1:31000" {
|
||||||
|
t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
tgt = tg.Targets[1]
|
||||||
|
if tgt[model.AddressLabel] != "mesos-slave1:32000" {
|
||||||
|
t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "yes" {
|
||||||
|
t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatal("Did not get a target group.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func marathonTestAppListWithoutPortDefinitions(labels map[string]string, runningTasks int) *AppList {
|
||||||
|
var (
|
||||||
|
task = Task{
|
||||||
|
ID: "test-task-1",
|
||||||
|
Host: "mesos-slave1",
|
||||||
|
Ports: []uint32{31000, 32000},
|
||||||
|
}
|
||||||
|
docker = DockerContainer{
|
||||||
|
Image: "repo/image:tag",
|
||||||
|
PortMappings: []PortMappings{
|
||||||
|
{Labels: labels},
|
||||||
|
{Labels: make(map[string]string)},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
container = Container{Docker: docker}
|
||||||
|
app = App{
|
||||||
|
ID: "test-service",
|
||||||
|
Tasks: []Task{task},
|
||||||
|
RunningTasks: runningTasks,
|
||||||
|
Labels: labels,
|
||||||
|
Container: container,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return &AppList{
|
||||||
|
Apps: []App{app},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarathonSDSendGroupWithoutPortDefinitions(t *testing.T) {
|
||||||
|
var (
|
||||||
|
ch = make(chan []*config.TargetGroup, 1)
|
||||||
|
client = func(client *http.Client, url, token string) (*AppList, error) {
|
||||||
|
return marathonTestAppListWithoutPortDefinitions(marathonValidLabel, 1), nil
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if err := testUpdateServices(client, ch); err != nil {
|
||||||
|
t.Fatalf("Got error: %s", err)
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case tgs := <-ch:
|
||||||
|
tg := tgs[0]
|
||||||
|
|
||||||
|
if tg.Source != "test-service" {
|
||||||
|
t.Fatalf("Wrong target group name: %s", tg.Source)
|
||||||
|
}
|
||||||
|
if len(tg.Targets) != 2 {
|
||||||
|
t.Fatalf("Wrong number of targets: %v", tg.Targets)
|
||||||
|
}
|
||||||
|
tgt := tg.Targets[0]
|
||||||
|
if tgt[model.AddressLabel] != "mesos-slave1:31000" {
|
||||||
|
t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "yes" {
|
||||||
|
t.Fatalf("Wrong first portMappings label from the first port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong first portDefinitions label from the first port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
tgt = tg.Targets[1]
|
||||||
|
if tgt[model.AddressLabel] != "mesos-slave1:32000" {
|
||||||
|
t.Fatalf("Wrong target address: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portMappingLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong portMappings label from the second port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
if tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")] != "" {
|
||||||
|
t.Fatalf("Wrong portDefinitions label from the second port: %s", tgt[model.AddressLabel])
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
t.Fatal("Did not get a target group.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -87,13 +87,13 @@ func relabel(labels model.LabelSet, cfg *config.RelabelConfig) model.LabelSet {
|
||||||
}
|
}
|
||||||
labels = out
|
labels = out
|
||||||
case config.RelabelLabelDrop:
|
case config.RelabelLabelDrop:
|
||||||
for ln, _ := range labels {
|
for ln := range labels {
|
||||||
if cfg.Regex.MatchString(string(ln)) {
|
if cfg.Regex.MatchString(string(ln)) {
|
||||||
delete(labels, ln)
|
delete(labels, ln)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case config.RelabelLabelKeep:
|
case config.RelabelLabelKeep:
|
||||||
for ln, _ := range labels {
|
for ln := range labels {
|
||||||
if !cfg.Regex.MatchString(string(ln)) {
|
if !cfg.Regex.MatchString(string(ln)) {
|
||||||
delete(labels, ln)
|
delete(labels, ln)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
// Copyright 2013 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ewmaRate tracks an exponentially weighted moving average of a per-second rate.
|
||||||
|
type ewmaRate struct {
|
||||||
|
newEvents int64
|
||||||
|
alpha float64
|
||||||
|
interval time.Duration
|
||||||
|
lastRate float64
|
||||||
|
init bool
|
||||||
|
mutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEWMARate(alpha float64, interval time.Duration) ewmaRate {
|
||||||
|
return ewmaRate{
|
||||||
|
alpha: alpha,
|
||||||
|
interval: interval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// rate returns the per-second rate.
|
||||||
|
func (r *ewmaRate) rate() float64 {
|
||||||
|
r.mutex.Lock()
|
||||||
|
defer r.mutex.Unlock()
|
||||||
|
return r.lastRate
|
||||||
|
}
|
||||||
|
|
||||||
|
// tick assumes to be called every r.interval.
|
||||||
|
func (r *ewmaRate) tick() {
|
||||||
|
newEvents := atomic.LoadInt64(&r.newEvents)
|
||||||
|
atomic.AddInt64(&r.newEvents, -newEvents)
|
||||||
|
instantRate := float64(newEvents) / r.interval.Seconds()
|
||||||
|
|
||||||
|
r.mutex.Lock()
|
||||||
|
defer r.mutex.Unlock()
|
||||||
|
|
||||||
|
if r.init {
|
||||||
|
r.lastRate += r.alpha * (instantRate - r.lastRate)
|
||||||
|
} else {
|
||||||
|
r.init = true
|
||||||
|
r.lastRate = instantRate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// inc counts one event.
|
||||||
|
func (r *ewmaRate) incr(incr int64) {
|
||||||
|
atomic.AddInt64(&r.newEvents, incr)
|
||||||
|
}
|
|
@ -14,6 +14,7 @@
|
||||||
package remote
|
package remote
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -32,13 +33,28 @@ const (
|
||||||
subsystem = "remote_storage"
|
subsystem = "remote_storage"
|
||||||
queue = "queue"
|
queue = "queue"
|
||||||
|
|
||||||
defaultShards = 10
|
// With a maximum of 1000 shards, assuming an average of 100ms remote write
|
||||||
|
// time and 100 samples per batch, we will be able to push 1M samples/s.
|
||||||
|
defaultMaxShards = 1000
|
||||||
defaultMaxSamplesPerSend = 100
|
defaultMaxSamplesPerSend = 100
|
||||||
// The queue capacity is per shard.
|
|
||||||
defaultQueueCapacity = 100 * 1024 / defaultShards
|
// defaultQueueCapacity is per shard - at 1000 shards, this will buffer
|
||||||
|
// 100M samples. It is configured to buffer 1000 batches, which at 100ms
|
||||||
|
// per batch is 1:40mins.
|
||||||
|
defaultQueueCapacity = defaultMaxSamplesPerSend * 1000
|
||||||
defaultBatchSendDeadline = 5 * time.Second
|
defaultBatchSendDeadline = 5 * time.Second
|
||||||
logRateLimit = 0.1 // Limit to 1 log event every 10s
|
|
||||||
logBurst = 10
|
// We track samples in/out and how long pushes take using an Exponentially
|
||||||
|
// Weighted Moving Average.
|
||||||
|
ewmaWeight = 0.2
|
||||||
|
shardUpdateDuration = 10 * time.Second
|
||||||
|
|
||||||
|
// Allow 30% too many shards before scaling down.
|
||||||
|
shardToleranceFraction = 0.3
|
||||||
|
|
||||||
|
// Limit to 1 log event every 10s
|
||||||
|
logRateLimit = 0.1
|
||||||
|
logBurst = 10
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -97,6 +113,15 @@ var (
|
||||||
},
|
},
|
||||||
[]string{queue},
|
[]string{queue},
|
||||||
)
|
)
|
||||||
|
numShards = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: "shards",
|
||||||
|
Help: "The number of shards used for parallel sending to the remote storage.",
|
||||||
|
},
|
||||||
|
[]string{queue},
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -106,6 +131,7 @@ func init() {
|
||||||
prometheus.MustRegister(sentBatchDuration)
|
prometheus.MustRegister(sentBatchDuration)
|
||||||
prometheus.MustRegister(queueLength)
|
prometheus.MustRegister(queueLength)
|
||||||
prometheus.MustRegister(queueCapacity)
|
prometheus.MustRegister(queueCapacity)
|
||||||
|
prometheus.MustRegister(numShards)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StorageClient defines an interface for sending a batch of samples to an
|
// StorageClient defines an interface for sending a batch of samples to an
|
||||||
|
@ -120,7 +146,7 @@ type StorageClient interface {
|
||||||
// QueueManagerConfig configures a storage queue.
|
// QueueManagerConfig configures a storage queue.
|
||||||
type QueueManagerConfig struct {
|
type QueueManagerConfig struct {
|
||||||
QueueCapacity int // Number of samples to buffer per shard before we start dropping them.
|
QueueCapacity int // Number of samples to buffer per shard before we start dropping them.
|
||||||
Shards int // Number of shards, i.e. amount of concurrency.
|
MaxShards int // Max number of shards, i.e. amount of concurrency.
|
||||||
MaxSamplesPerSend int // Maximum number of samples per send.
|
MaxSamplesPerSend int // Maximum number of samples per send.
|
||||||
BatchSendDeadline time.Duration // Maximum time sample will wait in buffer.
|
BatchSendDeadline time.Duration // Maximum time sample will wait in buffer.
|
||||||
ExternalLabels model.LabelSet
|
ExternalLabels model.LabelSet
|
||||||
|
@ -132,11 +158,18 @@ type QueueManagerConfig struct {
|
||||||
// indicated by the provided StorageClient.
|
// indicated by the provided StorageClient.
|
||||||
type QueueManager struct {
|
type QueueManager struct {
|
||||||
cfg QueueManagerConfig
|
cfg QueueManagerConfig
|
||||||
shards []chan *model.Sample
|
|
||||||
wg sync.WaitGroup
|
|
||||||
done chan struct{}
|
|
||||||
queueName string
|
queueName string
|
||||||
logLimiter *rate.Limiter
|
logLimiter *rate.Limiter
|
||||||
|
|
||||||
|
shardsMtx sync.Mutex
|
||||||
|
shards *shards
|
||||||
|
numShards int
|
||||||
|
reshardChan chan int
|
||||||
|
quit chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
samplesIn, samplesOut, samplesOutDuration ewmaRate
|
||||||
|
integralAccumulator float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewQueueManager builds a new QueueManager.
|
// NewQueueManager builds a new QueueManager.
|
||||||
|
@ -144,8 +177,8 @@ func NewQueueManager(cfg QueueManagerConfig) *QueueManager {
|
||||||
if cfg.QueueCapacity == 0 {
|
if cfg.QueueCapacity == 0 {
|
||||||
cfg.QueueCapacity = defaultQueueCapacity
|
cfg.QueueCapacity = defaultQueueCapacity
|
||||||
}
|
}
|
||||||
if cfg.Shards == 0 {
|
if cfg.MaxShards == 0 {
|
||||||
cfg.Shards = defaultShards
|
cfg.MaxShards = defaultMaxShards
|
||||||
}
|
}
|
||||||
if cfg.MaxSamplesPerSend == 0 {
|
if cfg.MaxSamplesPerSend == 0 {
|
||||||
cfg.MaxSamplesPerSend = defaultMaxSamplesPerSend
|
cfg.MaxSamplesPerSend = defaultMaxSamplesPerSend
|
||||||
|
@ -154,21 +187,22 @@ func NewQueueManager(cfg QueueManagerConfig) *QueueManager {
|
||||||
cfg.BatchSendDeadline = defaultBatchSendDeadline
|
cfg.BatchSendDeadline = defaultBatchSendDeadline
|
||||||
}
|
}
|
||||||
|
|
||||||
shards := make([]chan *model.Sample, cfg.Shards)
|
|
||||||
for i := 0; i < cfg.Shards; i++ {
|
|
||||||
shards[i] = make(chan *model.Sample, cfg.QueueCapacity)
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &QueueManager{
|
t := &QueueManager{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
shards: shards,
|
queueName: cfg.Client.Name(),
|
||||||
done: make(chan struct{}),
|
logLimiter: rate.NewLimiter(logRateLimit, logBurst),
|
||||||
queueName: cfg.Client.Name(),
|
numShards: 1,
|
||||||
logLimiter: rate.NewLimiter(logRateLimit, logBurst),
|
reshardChan: make(chan int),
|
||||||
}
|
quit: make(chan struct{}),
|
||||||
|
|
||||||
|
samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration),
|
||||||
|
samplesOut: newEWMARate(ewmaWeight, shardUpdateDuration),
|
||||||
|
samplesOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration),
|
||||||
|
}
|
||||||
|
t.shards = t.newShards(t.numShards)
|
||||||
|
numShards.WithLabelValues(t.queueName).Set(float64(t.numShards))
|
||||||
queueCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.QueueCapacity))
|
queueCapacity.WithLabelValues(t.queueName).Set(float64(t.cfg.QueueCapacity))
|
||||||
t.wg.Add(cfg.Shards)
|
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,13 +227,13 @@ func (t *QueueManager) Append(s *model.Sample) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fp := snew.Metric.FastFingerprint()
|
t.shardsMtx.Lock()
|
||||||
shard := uint64(fp) % uint64(t.cfg.Shards)
|
enqueued := t.shards.enqueue(&snew)
|
||||||
|
t.shardsMtx.Unlock()
|
||||||
|
|
||||||
select {
|
if enqueued {
|
||||||
case t.shards[shard] <- &snew:
|
|
||||||
queueLength.WithLabelValues(t.queueName).Inc()
|
queueLength.WithLabelValues(t.queueName).Inc()
|
||||||
default:
|
} else {
|
||||||
droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
|
droppedSamplesTotal.WithLabelValues(t.queueName).Inc()
|
||||||
if t.logLimiter.Allow() {
|
if t.logLimiter.Allow() {
|
||||||
log.Warn("Remote storage queue full, discarding sample. Multiple subsequent messages of this kind may be suppressed.")
|
log.Warn("Remote storage queue full, discarding sample. Multiple subsequent messages of this kind may be suppressed.")
|
||||||
|
@ -218,25 +252,186 @@ func (*QueueManager) NeedsThrottling() bool {
|
||||||
// Start the queue manager sending samples to the remote storage.
|
// Start the queue manager sending samples to the remote storage.
|
||||||
// Does not block.
|
// Does not block.
|
||||||
func (t *QueueManager) Start() {
|
func (t *QueueManager) Start() {
|
||||||
for i := 0; i < t.cfg.Shards; i++ {
|
t.wg.Add(2)
|
||||||
go t.runShard(i)
|
go t.updateShardsLoop()
|
||||||
}
|
go t.reshardLoop()
|
||||||
|
|
||||||
|
t.shardsMtx.Lock()
|
||||||
|
defer t.shardsMtx.Unlock()
|
||||||
|
t.shards.start()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops sending samples to the remote storage and waits for pending
|
// Stop stops sending samples to the remote storage and waits for pending
|
||||||
// sends to complete.
|
// sends to complete.
|
||||||
func (t *QueueManager) Stop() {
|
func (t *QueueManager) Stop() {
|
||||||
log.Infof("Stopping remote storage...")
|
log.Infof("Stopping remote storage...")
|
||||||
for _, shard := range t.shards {
|
close(t.quit)
|
||||||
close(shard)
|
|
||||||
}
|
|
||||||
t.wg.Wait()
|
t.wg.Wait()
|
||||||
|
|
||||||
|
t.shardsMtx.Lock()
|
||||||
|
defer t.shardsMtx.Unlock()
|
||||||
|
t.shards.stop()
|
||||||
log.Info("Remote storage stopped.")
|
log.Info("Remote storage stopped.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *QueueManager) runShard(i int) {
|
func (t *QueueManager) updateShardsLoop() {
|
||||||
defer t.wg.Done()
|
defer t.wg.Done()
|
||||||
shard := t.shards[i]
|
|
||||||
|
ticker := time.Tick(shardUpdateDuration)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker:
|
||||||
|
t.calculateDesiredShards()
|
||||||
|
case <-t.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *QueueManager) calculateDesiredShards() {
|
||||||
|
t.samplesIn.tick()
|
||||||
|
t.samplesOut.tick()
|
||||||
|
t.samplesOutDuration.tick()
|
||||||
|
|
||||||
|
// We use the number of incoming samples as a prediction of how much work we
|
||||||
|
// will need to do next iteration. We add to this any pending samples
|
||||||
|
// (received - send) so we can catch up with any backlog. We use the average
|
||||||
|
// outgoing batch latency to work out how many shards we need.
|
||||||
|
var (
|
||||||
|
samplesIn = t.samplesIn.rate()
|
||||||
|
samplesOut = t.samplesOut.rate()
|
||||||
|
samplesPending = samplesIn - samplesOut
|
||||||
|
samplesOutDuration = t.samplesOutDuration.rate()
|
||||||
|
)
|
||||||
|
|
||||||
|
// We use an integral accumulator, like in a PID, to help dampen oscillation.
|
||||||
|
t.integralAccumulator = t.integralAccumulator + (samplesPending * 0.1)
|
||||||
|
|
||||||
|
if samplesOut <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
timePerSample = samplesOutDuration / samplesOut
|
||||||
|
desiredShards = (timePerSample * (samplesIn + samplesPending + t.integralAccumulator)) / float64(time.Second)
|
||||||
|
)
|
||||||
|
log.Debugf("QueueManager.caclulateDesiredShards samplesIn=%f, samplesOut=%f, samplesPending=%f, desiredShards=%f",
|
||||||
|
samplesIn, samplesOut, samplesPending, desiredShards)
|
||||||
|
|
||||||
|
// Changes in the number of shards must be greater than shardToleranceFraction.
|
||||||
|
var (
|
||||||
|
lowerBound = float64(t.numShards) * (1. - shardToleranceFraction)
|
||||||
|
upperBound = float64(t.numShards) * (1. + shardToleranceFraction)
|
||||||
|
)
|
||||||
|
log.Debugf("QueueManager.updateShardsLoop %f <= %f <= %f", lowerBound, desiredShards, upperBound)
|
||||||
|
if lowerBound <= desiredShards && desiredShards <= upperBound {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
numShards := int(math.Ceil(desiredShards))
|
||||||
|
if numShards > t.cfg.MaxShards {
|
||||||
|
numShards = t.cfg.MaxShards
|
||||||
|
}
|
||||||
|
if numShards == t.numShards {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resharding can take some time, and we want this loop
|
||||||
|
// to stay close to shardUpdateDuration.
|
||||||
|
select {
|
||||||
|
case t.reshardChan <- numShards:
|
||||||
|
log.Infof("Remote storage resharding from %d to %d shards.", t.numShards, numShards)
|
||||||
|
t.numShards = numShards
|
||||||
|
default:
|
||||||
|
log.Infof("Currently resharding, skipping.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *QueueManager) reshardLoop() {
|
||||||
|
defer t.wg.Done()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case numShards := <-t.reshardChan:
|
||||||
|
t.reshard(numShards)
|
||||||
|
case <-t.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *QueueManager) reshard(n int) {
|
||||||
|
numShards.WithLabelValues(t.queueName).Set(float64(n))
|
||||||
|
|
||||||
|
t.shardsMtx.Lock()
|
||||||
|
newShards := t.newShards(n)
|
||||||
|
oldShards := t.shards
|
||||||
|
t.shards = newShards
|
||||||
|
t.shardsMtx.Unlock()
|
||||||
|
|
||||||
|
oldShards.stop()
|
||||||
|
|
||||||
|
// We start the newShards after we have stopped (the therefore completely
|
||||||
|
// flushed) the oldShards, to guarantee we only every deliver samples in
|
||||||
|
// order.
|
||||||
|
newShards.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
type shards struct {
|
||||||
|
qm *QueueManager
|
||||||
|
queues []chan *model.Sample
|
||||||
|
done chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *QueueManager) newShards(numShards int) *shards {
|
||||||
|
queues := make([]chan *model.Sample, numShards)
|
||||||
|
for i := 0; i < numShards; i++ {
|
||||||
|
queues[i] = make(chan *model.Sample, t.cfg.QueueCapacity)
|
||||||
|
}
|
||||||
|
s := &shards{
|
||||||
|
qm: t,
|
||||||
|
queues: queues,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
s.wg.Add(numShards)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *shards) len() int {
|
||||||
|
return len(s.queues)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *shards) start() {
|
||||||
|
for i := 0; i < len(s.queues); i++ {
|
||||||
|
go s.runShard(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *shards) stop() {
|
||||||
|
for _, shard := range s.queues {
|
||||||
|
close(shard)
|
||||||
|
}
|
||||||
|
s.wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *shards) enqueue(sample *model.Sample) bool {
|
||||||
|
s.qm.samplesIn.incr(1)
|
||||||
|
|
||||||
|
fp := sample.Metric.FastFingerprint()
|
||||||
|
shard := uint64(fp) % uint64(len(s.queues))
|
||||||
|
|
||||||
|
select {
|
||||||
|
case s.queues[shard] <- sample:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *shards) runShard(i int) {
|
||||||
|
defer s.wg.Done()
|
||||||
|
queue := s.queues[i]
|
||||||
|
|
||||||
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
|
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
|
||||||
// If we have fewer samples than that, flush them out after a deadline
|
// If we have fewer samples than that, flush them out after a deadline
|
||||||
|
@ -245,45 +440,48 @@ func (t *QueueManager) runShard(i int) {
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case s, ok := <-shard:
|
case sample, ok := <-queue:
|
||||||
if !ok {
|
if !ok {
|
||||||
if len(pendingSamples) > 0 {
|
if len(pendingSamples) > 0 {
|
||||||
log.Infof("Flushing %d samples to remote storage...", len(pendingSamples))
|
log.Debugf("Flushing %d samples to remote storage...", len(pendingSamples))
|
||||||
t.sendSamples(pendingSamples)
|
s.sendSamples(pendingSamples)
|
||||||
log.Infof("Done flushing.")
|
log.Debugf("Done flushing.")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
queueLength.WithLabelValues(t.queueName).Dec()
|
queueLength.WithLabelValues(s.qm.queueName).Dec()
|
||||||
pendingSamples = append(pendingSamples, s)
|
pendingSamples = append(pendingSamples, sample)
|
||||||
|
|
||||||
for len(pendingSamples) >= t.cfg.MaxSamplesPerSend {
|
for len(pendingSamples) >= s.qm.cfg.MaxSamplesPerSend {
|
||||||
t.sendSamples(pendingSamples[:t.cfg.MaxSamplesPerSend])
|
s.sendSamples(pendingSamples[:s.qm.cfg.MaxSamplesPerSend])
|
||||||
pendingSamples = pendingSamples[t.cfg.MaxSamplesPerSend:]
|
pendingSamples = pendingSamples[s.qm.cfg.MaxSamplesPerSend:]
|
||||||
}
|
}
|
||||||
case <-time.After(t.cfg.BatchSendDeadline):
|
case <-time.After(s.qm.cfg.BatchSendDeadline):
|
||||||
if len(pendingSamples) > 0 {
|
if len(pendingSamples) > 0 {
|
||||||
t.sendSamples(pendingSamples)
|
s.sendSamples(pendingSamples)
|
||||||
pendingSamples = pendingSamples[:0]
|
pendingSamples = pendingSamples[:0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *QueueManager) sendSamples(s model.Samples) {
|
func (s *shards) sendSamples(samples model.Samples) {
|
||||||
// Samples are sent to the remote storage on a best-effort basis. If a
|
// Samples are sent to the remote storage on a best-effort basis. If a
|
||||||
// sample isn't sent correctly the first time, it's simply dropped on the
|
// sample isn't sent correctly the first time, it's simply dropped on the
|
||||||
// floor.
|
// floor.
|
||||||
begin := time.Now()
|
begin := time.Now()
|
||||||
err := t.cfg.Client.Store(s)
|
err := s.qm.cfg.Client.Store(samples)
|
||||||
duration := time.Since(begin).Seconds()
|
duration := time.Since(begin)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error sending %d samples to remote storage: %s", len(s), err)
|
log.Warnf("error sending %d samples to remote storage: %s", len(samples), err)
|
||||||
failedSamplesTotal.WithLabelValues(t.queueName).Add(float64(len(s)))
|
failedSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
|
||||||
} else {
|
} else {
|
||||||
sentSamplesTotal.WithLabelValues(t.queueName).Add(float64(len(s)))
|
sentSamplesTotal.WithLabelValues(s.qm.queueName).Add(float64(len(samples)))
|
||||||
}
|
}
|
||||||
sentBatchDuration.WithLabelValues(t.queueName).Observe(duration)
|
sentBatchDuration.WithLabelValues(s.qm.queueName).Observe(duration.Seconds())
|
||||||
|
|
||||||
|
s.qm.samplesOut.incr(int64(len(samples)))
|
||||||
|
s.qm.samplesOutDuration.incr(int64(duration))
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,8 +98,8 @@ func TestSampleDelivery(t *testing.T) {
|
||||||
c.expectSamples(samples[:len(samples)/2])
|
c.expectSamples(samples[:len(samples)/2])
|
||||||
|
|
||||||
m := NewQueueManager(QueueManagerConfig{
|
m := NewQueueManager(QueueManagerConfig{
|
||||||
Client: c,
|
Client: c,
|
||||||
Shards: 1,
|
MaxShards: 1,
|
||||||
})
|
})
|
||||||
|
|
||||||
// These should be received by the client.
|
// These should be received by the client.
|
||||||
|
@ -185,8 +185,10 @@ func (c *TestBlockingStorageClient) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *QueueManager) queueLen() int {
|
func (t *QueueManager) queueLen() int {
|
||||||
|
t.shardsMtx.Lock()
|
||||||
|
defer t.shardsMtx.Unlock()
|
||||||
queueLength := 0
|
queueLength := 0
|
||||||
for _, shard := range t.shards {
|
for _, shard := range t.shards.queues {
|
||||||
queueLength += len(shard)
|
queueLength += len(shard)
|
||||||
}
|
}
|
||||||
return queueLength
|
return queueLength
|
||||||
|
@ -197,7 +199,7 @@ func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
|
||||||
// `MaxSamplesPerSend*Shards` samples should be consumed by the
|
// `MaxSamplesPerSend*Shards` samples should be consumed by the
|
||||||
// per-shard goroutines, and then another `MaxSamplesPerSend`
|
// per-shard goroutines, and then another `MaxSamplesPerSend`
|
||||||
// should be left on the queue.
|
// should be left on the queue.
|
||||||
n := defaultMaxSamplesPerSend*defaultShards + defaultMaxSamplesPerSend
|
n := defaultMaxSamplesPerSend*1 + defaultMaxSamplesPerSend
|
||||||
|
|
||||||
samples := make(model.Samples, 0, n)
|
samples := make(model.Samples, 0, n)
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
|
@ -214,6 +216,7 @@ func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
|
||||||
m := NewQueueManager(QueueManagerConfig{
|
m := NewQueueManager(QueueManagerConfig{
|
||||||
Client: c,
|
Client: c,
|
||||||
QueueCapacity: n,
|
QueueCapacity: n,
|
||||||
|
MaxShards: 1,
|
||||||
})
|
})
|
||||||
|
|
||||||
m.Start()
|
m.Start()
|
||||||
|
@ -250,7 +253,7 @@ func TestSpawnNotMoreThanMaxConcurrentSendsGoroutines(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
numCalls := c.NumCalls()
|
numCalls := c.NumCalls()
|
||||||
if numCalls != uint64(defaultShards) {
|
if numCalls != uint64(1) {
|
||||||
t.Errorf("Saw %d concurrent sends, expected %d", numCalls, defaultShards)
|
t.Errorf("Saw %d concurrent sends, expected 1", numCalls)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,7 +261,7 @@ func webUiTemplatesTargetsHtml() (*asset, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
info := bindataFileInfo{name: "web/ui/templates/targets.html", size: 2275, mode: os.FileMode(420), modTime: time.Unix(1489763290, 0)}
|
info := bindataFileInfo{name: "web/ui/templates/targets.html", size: 2275, mode: os.FileMode(420), modTime: time.Unix(1490015258, 0)}
|
||||||
a := &asset{bytes: bytes, info: info}
|
a := &asset{bytes: bytes, info: info}
|
||||||
return a, nil
|
return a, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -425,8 +425,8 @@ func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap {
|
||||||
},
|
},
|
||||||
"consolesPath": func() string { return consolesPath },
|
"consolesPath": func() string { return consolesPath },
|
||||||
"pathPrefix": func() string { return opts.ExternalURL.Path },
|
"pathPrefix": func() string { return opts.ExternalURL.Path },
|
||||||
"buildVersion": func() string { return opts.Version.Revision[:7] },
|
"buildVersion": func() string { return opts.Version.Revision },
|
||||||
"stripLabels": func(lset model.LabelSet, labels ...model.LabelName) model.LabelSet {
|
"stripLabels": func(lset map[string]string, labels ...string) map[string]string {
|
||||||
for _, ln := range labels {
|
for _, ln := range labels {
|
||||||
delete(lset, ln)
|
delete(lset, ln)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue