2016-04-13 14:08:22 +00:00
|
|
|
// Copyright 2016 The Prometheus Authors
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2015-06-04 16:07:57 +00:00
|
|
|
package v1
|
|
|
|
|
|
|
|
import (
|
2017-10-25 04:21:42 +00:00
|
|
|
"context"
|
2015-06-04 16:07:57 +00:00
|
|
|
"fmt"
|
2016-12-24 23:37:46 +00:00
|
|
|
"math"
|
2017-12-03 05:07:05 +00:00
|
|
|
"math/rand"
|
2015-06-04 16:07:57 +00:00
|
|
|
"net/http"
|
2017-04-25 05:42:33 +00:00
|
|
|
"net/url"
|
2017-12-03 05:07:05 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2019-01-17 15:01:06 +00:00
|
|
|
"regexp"
|
2017-10-26 10:44:49 +00:00
|
|
|
"sort"
|
2015-06-04 16:07:57 +00:00
|
|
|
"strconv"
|
2019-11-11 21:42:24 +00:00
|
|
|
"strings"
|
2015-06-04 16:07:57 +00:00
|
|
|
"time"
|
2018-02-07 12:27:57 +00:00
|
|
|
"unsafe"
|
2015-06-04 16:07:57 +00:00
|
|
|
|
2018-07-06 17:44:45 +00:00
|
|
|
"github.com/go-kit/kit/log"
|
|
|
|
"github.com/go-kit/kit/log/level"
|
2018-02-07 12:27:57 +00:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2019-03-25 23:01:12 +00:00
|
|
|
"github.com/pkg/errors"
|
2018-10-10 23:09:08 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2015-08-20 15:18:46 +00:00
|
|
|
"github.com/prometheus/common/model"
|
2015-09-24 15:07:11 +00:00
|
|
|
"github.com/prometheus/common/route"
|
2015-06-04 16:07:57 +00:00
|
|
|
|
2017-05-11 15:09:24 +00:00
|
|
|
"github.com/prometheus/prometheus/config"
|
2018-09-25 19:07:34 +00:00
|
|
|
"github.com/prometheus/prometheus/pkg/gate"
|
2016-12-29 08:27:30 +00:00
|
|
|
"github.com/prometheus/prometheus/pkg/labels"
|
2018-05-18 07:32:11 +00:00
|
|
|
"github.com/prometheus/prometheus/pkg/textparse"
|
2016-12-30 09:43:44 +00:00
|
|
|
"github.com/prometheus/prometheus/pkg/timestamp"
|
2017-10-23 20:28:17 +00:00
|
|
|
"github.com/prometheus/prometheus/prompb"
|
2015-06-04 16:07:57 +00:00
|
|
|
"github.com/prometheus/prometheus/promql"
|
2018-03-25 16:50:34 +00:00
|
|
|
"github.com/prometheus/prometheus/rules"
|
2018-02-01 09:55:07 +00:00
|
|
|
"github.com/prometheus/prometheus/scrape"
|
2016-12-29 08:27:30 +00:00
|
|
|
"github.com/prometheus/prometheus/storage"
|
2017-10-23 20:28:17 +00:00
|
|
|
"github.com/prometheus/prometheus/storage/remote"
|
2019-11-18 19:53:33 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb"
|
|
|
|
"github.com/prometheus/prometheus/tsdb/index"
|
2015-09-17 12:49:50 +00:00
|
|
|
"github.com/prometheus/prometheus/util/httputil"
|
2017-02-08 11:58:40 +00:00
|
|
|
"github.com/prometheus/prometheus/util/stats"
|
2015-06-04 16:07:57 +00:00
|
|
|
)
|
|
|
|
|
2018-10-10 23:09:08 +00:00
|
|
|
const (
|
|
|
|
namespace = "prometheus"
|
|
|
|
subsystem = "api"
|
|
|
|
)
|
|
|
|
|
2015-06-04 16:07:57 +00:00
|
|
|
type status string
|
|
|
|
|
|
|
|
const (
|
|
|
|
statusSuccess status = "success"
|
2017-12-02 13:52:43 +00:00
|
|
|
statusError status = "error"
|
2015-06-04 16:07:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type errorType string
|
|
|
|
|
|
|
|
const (
|
2017-12-03 05:07:05 +00:00
|
|
|
errorNone errorType = ""
|
2017-12-03 18:46:10 +00:00
|
|
|
errorTimeout errorType = "timeout"
|
|
|
|
errorCanceled errorType = "canceled"
|
|
|
|
errorExec errorType = "execution"
|
|
|
|
errorBadData errorType = "bad_data"
|
|
|
|
errorInternal errorType = "internal"
|
|
|
|
errorUnavailable errorType = "unavailable"
|
2018-05-18 07:32:11 +00:00
|
|
|
errorNotFound errorType = "not_found"
|
2015-06-04 16:07:57 +00:00
|
|
|
)
|
|
|
|
|
2018-10-10 23:09:08 +00:00
|
|
|
var remoteReadQueries = prometheus.NewGauge(prometheus.GaugeOpts{
|
|
|
|
Namespace: namespace,
|
|
|
|
Subsystem: subsystem,
|
|
|
|
Name: "remote_read_queries",
|
|
|
|
Help: "The current number of remote read queries being executed or waiting.",
|
|
|
|
})
|
|
|
|
|
2015-06-04 16:07:57 +00:00
|
|
|
type apiError struct {
|
|
|
|
typ errorType
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *apiError) Error() string {
|
|
|
|
return fmt.Sprintf("%s: %s", e.typ, e.err)
|
|
|
|
}
|
|
|
|
|
2016-12-02 12:31:43 +00:00
|
|
|
type targetRetriever interface {
|
2018-09-26 09:20:56 +00:00
|
|
|
TargetsActive() map[string][]*scrape.Target
|
|
|
|
TargetsDropped() map[string][]*scrape.Target
|
2016-12-02 12:31:43 +00:00
|
|
|
}
|
|
|
|
|
2017-01-13 09:20:11 +00:00
|
|
|
type alertmanagerRetriever interface {
|
2017-04-25 05:42:33 +00:00
|
|
|
Alertmanagers() []*url.URL
|
2018-02-21 09:00:07 +00:00
|
|
|
DroppedAlertmanagers() []*url.URL
|
2016-12-02 12:31:43 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
type rulesRetriever interface {
|
2018-03-25 16:50:34 +00:00
|
|
|
RuleGroups() []*rules.Group
|
2018-06-27 07:15:17 +00:00
|
|
|
AlertingRules() []*rules.AlertingRule
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
|
|
|
|
2019-11-02 15:53:32 +00:00
|
|
|
// PrometheusVersion contains build information about Prometheus.
|
|
|
|
type PrometheusVersion struct {
|
|
|
|
Version string `json:"version"`
|
|
|
|
Revision string `json:"revision"`
|
|
|
|
Branch string `json:"branch"`
|
|
|
|
BuildUser string `json:"buildUser"`
|
|
|
|
BuildDate string `json:"buildDate"`
|
|
|
|
GoVersion string `json:"goVersion"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// RuntimeInfo contains runtime information about Prometheus.
|
|
|
|
type RuntimeInfo struct {
|
|
|
|
StartTime time.Time `json:"startTime"`
|
|
|
|
CWD string `json:"CWD"`
|
|
|
|
ReloadConfigSuccess bool `json:"reloadConfigSuccess"`
|
|
|
|
LastConfigTime time.Time `json:"lastConfigTime"`
|
|
|
|
ChunkCount int64 `json:"chunkCount"`
|
|
|
|
TimeSeriesCount int64 `json:"timeSeriesCount"`
|
|
|
|
CorruptionCount int64 `json:"corruptionCount"`
|
|
|
|
GoroutineCount int `json:"goroutineCount"`
|
|
|
|
GOMAXPROCS int `json:"GOMAXPROCS"`
|
|
|
|
GOGC string `json:"GOGC"`
|
|
|
|
GODEBUG string `json:"GODEBUG"`
|
|
|
|
StorageRetention string `json:"storageRetention"`
|
|
|
|
}
|
|
|
|
|
2015-06-04 16:07:57 +00:00
|
|
|
type response struct {
|
|
|
|
Status status `json:"status"`
|
|
|
|
Data interface{} `json:"data,omitempty"`
|
|
|
|
ErrorType errorType `json:"errorType,omitempty"`
|
|
|
|
Error string `json:"error,omitempty"`
|
2018-11-30 14:27:12 +00:00
|
|
|
Warnings []string `json:"warnings,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type apiFuncResult struct {
|
|
|
|
data interface{}
|
|
|
|
err *apiError
|
|
|
|
warnings storage.Warnings
|
|
|
|
finalizer func()
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
type apiFunc func(r *http.Request) apiFuncResult
|
2015-11-11 19:46:57 +00:00
|
|
|
|
2018-11-15 13:22:16 +00:00
|
|
|
// TSDBAdmin defines the tsdb interfaces used by the v1 API for admin operations.
|
|
|
|
type TSDBAdmin interface {
|
|
|
|
CleanTombstones() error
|
2019-11-18 19:53:33 +00:00
|
|
|
Delete(mint, maxt int64, ms ...*labels.Matcher) error
|
2018-11-15 13:22:16 +00:00
|
|
|
Dir() string
|
|
|
|
Snapshot(dir string, withHead bool) error
|
2019-11-12 10:15:20 +00:00
|
|
|
Head() *tsdb.Head
|
2018-11-15 13:22:16 +00:00
|
|
|
}
|
|
|
|
|
2015-06-04 16:07:57 +00:00
|
|
|
// API can register a set of endpoints in a router and handle
|
|
|
|
// them using the provided storage and query engine.
|
|
|
|
type API struct {
|
2018-01-09 16:44:23 +00:00
|
|
|
Queryable storage.Queryable
|
2015-06-04 16:07:57 +00:00
|
|
|
QueryEngine *promql.Engine
|
2015-06-08 19:19:52 +00:00
|
|
|
|
2017-01-13 09:20:11 +00:00
|
|
|
targetRetriever targetRetriever
|
|
|
|
alertmanagerRetriever alertmanagerRetriever
|
2018-06-27 07:15:17 +00:00
|
|
|
rulesRetriever rulesRetriever
|
2018-03-25 16:50:34 +00:00
|
|
|
now func() time.Time
|
|
|
|
config func() config.Config
|
|
|
|
flagsMap map[string]string
|
|
|
|
ready func(http.HandlerFunc) http.HandlerFunc
|
2017-12-03 05:07:05 +00:00
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
db func() TSDBAdmin
|
|
|
|
enableAdmin bool
|
|
|
|
logger log.Logger
|
|
|
|
remoteReadSampleLimit int
|
|
|
|
remoteReadMaxBytesInFrame int
|
|
|
|
remoteReadGate *gate.Gate
|
|
|
|
CORSOrigin *regexp.Regexp
|
2019-11-02 15:53:32 +00:00
|
|
|
buildInfo *PrometheusVersion
|
|
|
|
runtimeInfo func() (RuntimeInfo, error)
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2018-10-10 23:09:08 +00:00
|
|
|
func init() {
|
|
|
|
jsoniter.RegisterTypeEncoderFunc("promql.Point", marshalPointJSON, marshalPointJSONIsEmpty)
|
|
|
|
prometheus.MustRegister(remoteReadQueries)
|
|
|
|
}
|
|
|
|
|
2015-11-11 19:46:57 +00:00
|
|
|
// NewAPI returns an initialized API type.
|
2017-10-06 15:20:20 +00:00
|
|
|
func NewAPI(
|
|
|
|
qe *promql.Engine,
|
2018-01-09 16:44:23 +00:00
|
|
|
q storage.Queryable,
|
2017-10-06 15:20:20 +00:00
|
|
|
tr targetRetriever,
|
|
|
|
ar alertmanagerRetriever,
|
|
|
|
configFunc func() config.Config,
|
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 08:49:02 +00:00
|
|
|
flagsMap map[string]string,
|
2017-10-06 15:20:20 +00:00
|
|
|
readyFunc func(http.HandlerFunc) http.HandlerFunc,
|
2018-11-15 13:22:16 +00:00
|
|
|
db func() TSDBAdmin,
|
2017-12-03 05:07:05 +00:00
|
|
|
enableAdmin bool,
|
2018-07-06 17:44:45 +00:00
|
|
|
logger log.Logger,
|
2018-06-27 07:15:17 +00:00
|
|
|
rr rulesRetriever,
|
2018-09-25 19:07:34 +00:00
|
|
|
remoteReadSampleLimit int,
|
|
|
|
remoteReadConcurrencyLimit int,
|
2019-08-19 20:16:10 +00:00
|
|
|
remoteReadMaxBytesInFrame int,
|
2019-01-17 15:01:06 +00:00
|
|
|
CORSOrigin *regexp.Regexp,
|
2019-11-02 15:53:32 +00:00
|
|
|
runtimeInfo func() (RuntimeInfo, error),
|
|
|
|
buildInfo *PrometheusVersion,
|
2017-10-06 15:20:20 +00:00
|
|
|
) *API {
|
2015-11-11 19:46:57 +00:00
|
|
|
return &API{
|
2017-01-13 09:20:11 +00:00
|
|
|
QueryEngine: qe,
|
2017-07-06 12:38:40 +00:00
|
|
|
Queryable: q,
|
2017-01-13 09:20:11 +00:00
|
|
|
targetRetriever: tr,
|
|
|
|
alertmanagerRetriever: ar,
|
2018-09-05 13:50:50 +00:00
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
now: time.Now,
|
|
|
|
config: configFunc,
|
|
|
|
flagsMap: flagsMap,
|
|
|
|
ready: readyFunc,
|
|
|
|
db: db,
|
|
|
|
enableAdmin: enableAdmin,
|
|
|
|
rulesRetriever: rr,
|
|
|
|
remoteReadSampleLimit: remoteReadSampleLimit,
|
|
|
|
remoteReadGate: gate.New(remoteReadConcurrencyLimit),
|
|
|
|
remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame,
|
|
|
|
logger: logger,
|
|
|
|
CORSOrigin: CORSOrigin,
|
2019-11-02 15:53:32 +00:00
|
|
|
runtimeInfo: runtimeInfo,
|
|
|
|
buildInfo: buildInfo,
|
2015-11-11 19:46:57 +00:00
|
|
|
}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Register the API's endpoints in the given router.
|
|
|
|
func (api *API) Register(r *route.Router) {
|
2018-03-21 08:16:16 +00:00
|
|
|
wrap := func(f apiFunc) http.HandlerFunc {
|
2015-09-17 12:49:50 +00:00
|
|
|
hf := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2019-01-17 15:01:06 +00:00
|
|
|
httputil.SetCORS(w, api.CORSOrigin, r)
|
2018-11-30 14:27:12 +00:00
|
|
|
result := f(r)
|
|
|
|
if result.err != nil {
|
|
|
|
api.respondError(w, result.err, result.data)
|
|
|
|
} else if result.data != nil {
|
|
|
|
api.respond(w, result.data, result.warnings)
|
2016-01-26 00:32:46 +00:00
|
|
|
} else {
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2018-11-30 14:27:12 +00:00
|
|
|
if result.finalizer != nil {
|
|
|
|
result.finalizer()
|
Optimise PromQL (#3966)
* Move range logic to 'eval'
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make aggregegate range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* PromQL is statically typed, so don't eval to find the type.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Extend rangewrapper to multiple exprs
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Start making function evaluation ranged
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make instant queries a special case of range queries
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Eliminate evalString
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Evaluate range vector functions one series at a time
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make unary operators range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make binops range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Pass time to range-aware functions.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make simple _over_time functions range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reduce allocs when working with matrix selectors
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add basic benchmark for range evaluation
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse objects for function arguments
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Do dropmetricname and allocating output vector only once.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add range-aware support for range vector functions with params
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise holt_winters, cut cpu and allocs by ~25%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make rate&friends range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make more functions range aware. Document calling convention.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make date functions range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make simple math functions range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Convert more functions to be range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make more functions range aware
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Specialcase timestamp() with vector selector arg for range awareness
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove transition code for functions
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove the rest of the engine transition code
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove more obselete code
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove the last uses of the eval* functions
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove engine finalizers to prevent corruption
The finalizers set by matrixSelector were being called
just before the value they were retruning to the pool
was then being provided to the caller. Thus a concurrent query
could corrupt the data that the user has just been returned.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add new benchmark suite for range functinos
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Migrate existing benchmarks to new system
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Expand promql benchmarks
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Simply test by removing unused range code
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* When testing instant queries, check range queries too.
To protect against subsequent steps in a range query being
affected by the previous steps, add a test that evaluates
an instant query that we know works again as a range query
with the tiimestamp we care about not being the first step.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse ring for matrix iters. Put query results back in pool.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse buffer when iterating over matrix selectors
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Unary minus should remove metric name
Cut down benchmarks for faster runs.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reduce repetition in benchmark test cases
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Work series by series when doing normal vectorSelectors
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise benchmark setup, cuts time by 60%
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Have rangeWrapper use an evalNodeHelper to cache across steps
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Use evalNodeHelper with functions
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Cache dropMetricName within a node evaluation.
This saves both the calculations and allocs done by dropMetricName
across steps.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse input vectors in rangewrapper
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Reuse the point slices in the matrixes input/output by rangeWrapper
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make benchmark setup faster using AddFast
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Simplify benchmark code.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add caching in VectorBinop
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Use xor to have one-level resultMetric hash key
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Add more benchmarks
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Call Query.Close in apiv1
This allows point slices allocated for the response data
to be reused by later queries, saving allocations.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise histogram_quantile
It's now 5-10% faster with 97% less garbage generated for 1k steps
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make the input collection in rangeVector linear rather than quadratic
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise label_replace, for 1k steps 15x fewer allocs and 3x faster
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Optimise label_join, 1.8x faster and 11x less memory for 1k steps
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Expand benchmarks, cleanup comments, simplify numSteps logic.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Address Fabian's comments
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Comments from Alin.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Address jrv's comments
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Remove dead code
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Address Simon's comments.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Rename populateIterators, pre-init some sizes
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Handle case where function has non-matrix args first
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Split rangeWrapper out to rangeEval function, improve comments
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Cleanup and make things more consistent
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Make EvalNodeHelper public
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
* Fabian's comments.
Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
2018-06-04 13:47:45 +00:00
|
|
|
}
|
2015-06-04 16:07:57 +00:00
|
|
|
})
|
2018-03-21 08:16:16 +00:00
|
|
|
return api.ready(httputil.CompressionHandler{
|
2015-09-18 14:51:53 +00:00
|
|
|
Handler: hf,
|
2018-03-21 08:16:16 +00:00
|
|
|
}.ServeHTTP)
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Options("/*path", wrap(api.options))
|
2016-01-26 00:32:46 +00:00
|
|
|
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Get("/query", wrap(api.query))
|
|
|
|
r.Post("/query", wrap(api.query))
|
|
|
|
r.Get("/query_range", wrap(api.queryRange))
|
|
|
|
r.Post("/query_range", wrap(api.queryRange))
|
2015-06-04 16:07:57 +00:00
|
|
|
|
2018-11-19 10:21:14 +00:00
|
|
|
r.Get("/labels", wrap(api.labelNames))
|
|
|
|
r.Post("/labels", wrap(api.labelNames))
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Get("/label/:name/values", wrap(api.labelValues))
|
2015-06-09 14:09:31 +00:00
|
|
|
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Get("/series", wrap(api.series))
|
2019-04-02 17:00:29 +00:00
|
|
|
r.Post("/series", wrap(api.series))
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Del("/series", wrap(api.dropSeries))
|
2016-12-02 12:31:43 +00:00
|
|
|
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Get("/targets", wrap(api.targets))
|
2018-05-18 07:32:11 +00:00
|
|
|
r.Get("/targets/metadata", wrap(api.targetMetadata))
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Get("/alertmanagers", wrap(api.alertmanagers))
|
2017-05-11 15:09:24 +00:00
|
|
|
|
2019-12-10 14:56:16 +00:00
|
|
|
r.Get("/metadata", wrap(api.metricMetadata))
|
|
|
|
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Get("/status/config", wrap(api.serveConfig))
|
2019-11-02 15:53:32 +00:00
|
|
|
r.Get("/status/runtimeinfo", wrap(api.serveRuntimeInfo))
|
|
|
|
r.Get("/status/buildinfo", wrap(api.serveBuildInfo))
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Get("/status/flags", wrap(api.serveFlags))
|
2019-11-12 10:15:20 +00:00
|
|
|
r.Get("/status/tsdb", wrap(api.serveTSDBStatus))
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Post("/read", api.ready(http.HandlerFunc(api.remoteRead)))
|
2017-12-03 05:07:05 +00:00
|
|
|
|
2018-03-25 16:50:34 +00:00
|
|
|
r.Get("/alerts", wrap(api.alerts))
|
|
|
|
r.Get("/rules", wrap(api.rules))
|
|
|
|
|
2017-12-03 05:07:05 +00:00
|
|
|
// Admin APIs
|
2018-03-21 08:16:16 +00:00
|
|
|
r.Post("/admin/tsdb/delete_series", wrap(api.deleteSeries))
|
|
|
|
r.Post("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones))
|
|
|
|
r.Post("/admin/tsdb/snapshot", wrap(api.snapshot))
|
2019-03-20 17:33:45 +00:00
|
|
|
|
|
|
|
r.Put("/admin/tsdb/delete_series", wrap(api.deleteSeries))
|
|
|
|
r.Put("/admin/tsdb/clean_tombstones", wrap(api.cleanTombstones))
|
|
|
|
r.Put("/admin/tsdb/snapshot", wrap(api.snapshot))
|
|
|
|
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type queryData struct {
|
2017-11-16 15:30:48 +00:00
|
|
|
ResultType promql.ValueType `json:"resultType"`
|
|
|
|
Result promql.Value `json:"result"`
|
2017-02-08 11:58:40 +00:00
|
|
|
Stats *stats.QueryStats `json:"stats,omitempty"`
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) options(r *http.Request) apiFuncResult {
|
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2016-01-26 00:32:46 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) query(r *http.Request) apiFuncResult {
|
2016-12-24 23:37:46 +00:00
|
|
|
var ts time.Time
|
2015-11-11 19:46:57 +00:00
|
|
|
if t := r.FormValue("time"); t != "" {
|
|
|
|
var err error
|
|
|
|
ts, err = parseTime(t)
|
|
|
|
if err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'time'")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-11-11 19:46:57 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ts = api.now()
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2015-11-11 19:46:57 +00:00
|
|
|
|
2017-05-02 23:49:29 +00:00
|
|
|
ctx := r.Context()
|
2017-03-06 17:32:21 +00:00
|
|
|
if to := r.FormValue("timeout"); to != "" {
|
|
|
|
var cancel context.CancelFunc
|
|
|
|
timeout, err := parseDuration(to)
|
|
|
|
if err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'timeout'")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-03-06 17:32:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, timeout)
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
2018-01-09 16:44:23 +00:00
|
|
|
qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, r.FormValue("query"), ts)
|
2015-06-04 16:07:57 +00:00
|
|
|
if err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'query'")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 17:32:21 +00:00
|
|
|
res := qry.Exec(ctx)
|
2015-06-04 16:07:57 +00:00
|
|
|
if res.Err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2017-11-16 15:30:48 +00:00
|
|
|
|
|
|
|
// Optional stats field in response if parameter "stats" is not empty.
|
|
|
|
var qs *stats.QueryStats
|
|
|
|
if r.FormValue("stats") != "" {
|
|
|
|
qs = stats.NewQueryStats(qry.Stats())
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{&queryData{
|
2015-06-04 16:07:57 +00:00
|
|
|
ResultType: res.Value.Type(),
|
|
|
|
Result: res.Value,
|
2017-11-16 15:30:48 +00:00
|
|
|
Stats: qs,
|
2018-11-30 14:27:12 +00:00
|
|
|
}, nil, res.Warnings, qry.Close}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) queryRange(r *http.Request) apiFuncResult {
|
2015-06-04 16:07:57 +00:00
|
|
|
start, err := parseTime(r.FormValue("start"))
|
|
|
|
if err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'start'")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
end, err := parseTime(r.FormValue("end"))
|
|
|
|
if err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'end'")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2016-11-01 13:25:34 +00:00
|
|
|
if end.Before(start) {
|
|
|
|
err := errors.New("end timestamp must not be before start time")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2016-11-01 13:25:34 +00:00
|
|
|
}
|
|
|
|
|
2015-06-04 16:07:57 +00:00
|
|
|
step, err := parseDuration(r.FormValue("step"))
|
|
|
|
if err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
err = errors.Wrapf(err, "invalid parameter 'step'")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 13:10:02 +00:00
|
|
|
if step <= 0 {
|
|
|
|
err := errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2016-08-16 13:10:02 +00:00
|
|
|
}
|
|
|
|
|
2015-06-04 16:07:57 +00:00
|
|
|
// For safety, limit the number of returned points per timeseries.
|
|
|
|
// This is sufficient for 60s resolution for a week or 1h resolution for a year.
|
|
|
|
if end.Sub(start)/step > 11000 {
|
|
|
|
err := errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2017-05-02 23:49:29 +00:00
|
|
|
ctx := r.Context()
|
2017-03-06 17:32:21 +00:00
|
|
|
if to := r.FormValue("timeout"); to != "" {
|
|
|
|
var cancel context.CancelFunc
|
|
|
|
timeout, err := parseDuration(to)
|
|
|
|
if err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
err = errors.Wrap(err, "invalid parameter 'timeout'")
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-03-06 17:32:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel = context.WithTimeout(ctx, timeout)
|
|
|
|
defer cancel()
|
|
|
|
}
|
|
|
|
|
2018-01-09 16:44:23 +00:00
|
|
|
qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, r.FormValue("query"), start, end, step)
|
2015-06-04 16:07:57 +00:00
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 17:32:21 +00:00
|
|
|
res := qry.Exec(ctx)
|
2015-06-04 16:07:57 +00:00
|
|
|
if res.Err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2016-12-29 08:27:30 +00:00
|
|
|
|
2017-02-08 11:58:40 +00:00
|
|
|
// Optional stats field in response if parameter "stats" is not empty.
|
|
|
|
var qs *stats.QueryStats
|
|
|
|
if r.FormValue("stats") != "" {
|
2017-11-16 15:30:48 +00:00
|
|
|
qs = stats.NewQueryStats(qry.Stats())
|
2017-02-08 11:58:40 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{&queryData{
|
2015-06-04 16:07:57 +00:00
|
|
|
ResultType: res.Value.Type(),
|
|
|
|
Result: res.Value,
|
2017-02-08 11:58:40 +00:00
|
|
|
Stats: qs,
|
2018-11-30 14:27:12 +00:00
|
|
|
}, nil, res.Warnings, qry.Close}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2018-11-14 09:55:54 +00:00
|
|
|
func returnAPIError(err error) *apiError {
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch err.(type) {
|
|
|
|
case promql.ErrQueryCanceled:
|
|
|
|
return &apiError{errorCanceled, err}
|
|
|
|
case promql.ErrQueryTimeout:
|
|
|
|
return &apiError{errorTimeout, err}
|
|
|
|
case promql.ErrStorage:
|
|
|
|
return &apiError{errorInternal, err}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &apiError{errorExec, err}
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) labelNames(r *http.Request) apiFuncResult {
|
2018-11-19 10:21:14 +00:00
|
|
|
q, err := api.Queryable.Querier(r.Context(), math.MinInt64, math.MaxInt64)
|
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
|
2018-11-19 10:21:14 +00:00
|
|
|
}
|
|
|
|
defer q.Close()
|
|
|
|
|
2019-06-17 07:31:17 +00:00
|
|
|
names, warnings, err := q.LabelNames()
|
2018-11-19 10:21:14 +00:00
|
|
|
if err != nil {
|
2019-06-17 07:31:17 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
|
2018-11-19 10:21:14 +00:00
|
|
|
}
|
2019-06-17 07:31:17 +00:00
|
|
|
return apiFuncResult{names, nil, warnings, nil}
|
2018-11-19 10:21:14 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) labelValues(r *http.Request) apiFuncResult {
|
2017-10-04 19:04:15 +00:00
|
|
|
ctx := r.Context()
|
|
|
|
name := route.Param(ctx, "name")
|
2015-06-08 19:19:52 +00:00
|
|
|
|
2015-08-20 15:18:46 +00:00
|
|
|
if !model.LabelNameRE.MatchString(name) {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.Errorf("invalid label name: %q", name)}, nil, nil}
|
2015-06-08 19:19:52 +00:00
|
|
|
}
|
2017-10-04 19:04:15 +00:00
|
|
|
q, err := api.Queryable.Querier(ctx, math.MinInt64, math.MaxInt64)
|
2016-10-12 17:34:22 +00:00
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
|
2016-10-12 17:34:22 +00:00
|
|
|
}
|
|
|
|
defer q.Close()
|
|
|
|
|
2019-06-17 07:31:17 +00:00
|
|
|
vals, warnings, err := q.LabelValues(name)
|
2016-07-11 18:27:25 +00:00
|
|
|
if err != nil {
|
2019-06-17 07:31:17 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
|
2016-07-11 18:27:25 +00:00
|
|
|
}
|
2015-06-04 16:07:57 +00:00
|
|
|
|
2019-06-17 07:31:17 +00:00
|
|
|
return apiFuncResult{vals, nil, warnings, nil}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2017-01-16 13:09:59 +00:00
|
|
|
var (
|
2019-07-08 09:43:59 +00:00
|
|
|
minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
|
|
|
|
maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
|
|
|
|
|
|
|
|
minTimeFormatted = minTime.Format(time.RFC3339Nano)
|
|
|
|
maxTimeFormatted = maxTime.Format(time.RFC3339Nano)
|
2017-01-16 13:09:59 +00:00
|
|
|
)
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) series(r *http.Request) apiFuncResult {
|
2018-08-17 15:24:35 +00:00
|
|
|
if err := r.ParseForm(); err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "error parsing form values")}, nil, nil}
|
2018-08-17 15:24:35 +00:00
|
|
|
}
|
2015-06-09 14:09:31 +00:00
|
|
|
if len(r.Form["match[]"]) == 0 {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil}
|
2015-06-09 14:09:31 +00:00
|
|
|
}
|
2016-05-11 21:59:52 +00:00
|
|
|
|
2016-12-24 23:37:46 +00:00
|
|
|
var start time.Time
|
2016-05-11 21:59:52 +00:00
|
|
|
if t := r.FormValue("start"); t != "" {
|
|
|
|
var err error
|
|
|
|
start, err = parseTime(t)
|
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2016-05-11 21:59:52 +00:00
|
|
|
}
|
|
|
|
} else {
|
2017-01-16 13:09:59 +00:00
|
|
|
start = minTime
|
2016-05-11 21:59:52 +00:00
|
|
|
}
|
|
|
|
|
2016-12-24 23:37:46 +00:00
|
|
|
var end time.Time
|
2016-05-11 21:59:52 +00:00
|
|
|
if t := r.FormValue("end"); t != "" {
|
|
|
|
var err error
|
|
|
|
end, err = parseTime(t)
|
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2016-05-11 21:59:52 +00:00
|
|
|
}
|
|
|
|
} else {
|
2017-01-16 13:09:59 +00:00
|
|
|
end = maxTime
|
2016-05-11 21:59:52 +00:00
|
|
|
}
|
|
|
|
|
2016-12-29 08:27:30 +00:00
|
|
|
var matcherSets [][]*labels.Matcher
|
2016-07-11 18:27:25 +00:00
|
|
|
for _, s := range r.Form["match[]"] {
|
|
|
|
matchers, err := promql.ParseMetricSelector(s)
|
2015-06-09 14:09:31 +00:00
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2015-06-09 14:09:31 +00:00
|
|
|
}
|
2016-07-11 18:27:25 +00:00
|
|
|
matcherSets = append(matcherSets, matchers)
|
|
|
|
}
|
|
|
|
|
2017-10-04 19:04:15 +00:00
|
|
|
q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end))
|
2016-12-30 09:43:44 +00:00
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
|
2016-12-30 09:43:44 +00:00
|
|
|
}
|
|
|
|
defer q.Close()
|
|
|
|
|
2017-12-10 11:00:23 +00:00
|
|
|
var sets []storage.SeriesSet
|
2018-11-30 14:27:12 +00:00
|
|
|
var warnings storage.Warnings
|
2016-12-30 09:43:44 +00:00
|
|
|
for _, mset := range matcherSets {
|
2019-01-02 11:10:13 +00:00
|
|
|
s, wrn, err := q.Select(nil, mset...) //TODO
|
2018-11-30 14:27:12 +00:00
|
|
|
warnings = append(warnings, wrn...)
|
2017-11-23 12:50:06 +00:00
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
|
2017-11-23 12:50:06 +00:00
|
|
|
}
|
2017-12-10 11:00:23 +00:00
|
|
|
sets = append(sets, s)
|
2016-12-30 09:43:44 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
set := storage.NewMergeSeriesSet(sets, nil)
|
2017-04-04 09:09:11 +00:00
|
|
|
metrics := []labels.Labels{}
|
|
|
|
for set.Next() {
|
|
|
|
metrics = append(metrics, set.At().Labels())
|
|
|
|
}
|
|
|
|
if set.Err() != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorExec, set.Err()}, warnings, nil}
|
2017-04-04 09:09:11 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{metrics, nil, warnings, nil}
|
2015-06-09 14:09:31 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) dropSeries(r *http.Request) apiFuncResult {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, errors.New("not implemented")}, nil, nil}
|
2015-06-09 14:09:31 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 17:51:27 +00:00
|
|
|
// Target has the information for one target.
|
2016-12-02 12:31:43 +00:00
|
|
|
type Target struct {
|
|
|
|
// Labels before any processing.
|
2016-12-29 08:27:30 +00:00
|
|
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
2016-12-02 12:31:43 +00:00
|
|
|
// Any labels that are added to this target and its metrics.
|
2016-12-29 08:27:30 +00:00
|
|
|
Labels map[string]string `json:"labels"`
|
2016-12-02 12:31:43 +00:00
|
|
|
|
2019-11-11 21:42:24 +00:00
|
|
|
ScrapePool string `json:"scrapePool"`
|
|
|
|
ScrapeURL string `json:"scrapeUrl"`
|
2016-12-02 12:31:43 +00:00
|
|
|
|
2019-11-11 21:42:24 +00:00
|
|
|
LastError string `json:"lastError"`
|
|
|
|
LastScrape time.Time `json:"lastScrape"`
|
|
|
|
LastScrapeDuration float64 `json:"lastScrapeDuration"`
|
|
|
|
Health scrape.TargetHealth `json:"health"`
|
2016-12-02 12:31:43 +00:00
|
|
|
}
|
|
|
|
|
2018-02-21 17:26:18 +00:00
|
|
|
// DroppedTarget has the information for one target that was dropped during relabelling.
|
|
|
|
type DroppedTarget struct {
|
|
|
|
// Labels before any processing.
|
|
|
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
|
|
|
}
|
|
|
|
|
2017-03-06 11:46:37 +00:00
|
|
|
// TargetDiscovery has all the active targets.
|
2017-01-13 16:15:04 +00:00
|
|
|
type TargetDiscovery struct {
|
2018-10-25 08:19:20 +00:00
|
|
|
ActiveTargets []*Target `json:"activeTargets"`
|
|
|
|
DroppedTargets []*DroppedTarget `json:"droppedTargets"`
|
2017-01-13 16:15:04 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) targets(r *http.Request) apiFuncResult {
|
2019-11-11 21:42:24 +00:00
|
|
|
sortKeys := func(targets map[string][]*scrape.Target) ([]string, int) {
|
2018-10-25 08:19:20 +00:00
|
|
|
var n int
|
|
|
|
keys := make([]string, 0, len(targets))
|
|
|
|
for k := range targets {
|
|
|
|
keys = append(keys, k)
|
|
|
|
n += len(targets[k])
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
2019-11-11 21:42:24 +00:00
|
|
|
return keys, n
|
|
|
|
}
|
|
|
|
|
|
|
|
flatten := func(targets map[string][]*scrape.Target) []*scrape.Target {
|
|
|
|
keys, n := sortKeys(targets)
|
2018-10-25 08:19:20 +00:00
|
|
|
res := make([]*scrape.Target, 0, n)
|
|
|
|
for _, k := range keys {
|
|
|
|
res = append(res, targets[k]...)
|
2016-12-02 12:31:43 +00:00
|
|
|
}
|
2018-10-25 08:19:20 +00:00
|
|
|
return res
|
2016-12-02 12:31:43 +00:00
|
|
|
}
|
|
|
|
|
2019-11-11 21:42:24 +00:00
|
|
|
state := strings.ToLower(r.URL.Query().Get("state"))
|
|
|
|
showActive := state == "" || state == "any" || state == "active"
|
|
|
|
showDropped := state == "" || state == "any" || state == "dropped"
|
|
|
|
res := &TargetDiscovery{}
|
|
|
|
|
|
|
|
if showActive {
|
|
|
|
targetsActive := api.targetRetriever.TargetsActive()
|
|
|
|
activeKeys, numTargets := sortKeys(targetsActive)
|
|
|
|
res.ActiveTargets = make([]*Target, 0, numTargets)
|
|
|
|
|
|
|
|
for _, key := range activeKeys {
|
|
|
|
for _, target := range targetsActive[key] {
|
|
|
|
lastErrStr := ""
|
|
|
|
lastErr := target.LastError()
|
|
|
|
if lastErr != nil {
|
|
|
|
lastErrStr = lastErr.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
res.ActiveTargets = append(res.ActiveTargets, &Target{
|
|
|
|
DiscoveredLabels: target.DiscoveredLabels().Map(),
|
|
|
|
Labels: target.Labels().Map(),
|
|
|
|
ScrapePool: key,
|
|
|
|
ScrapeURL: target.URL().String(),
|
|
|
|
LastError: lastErrStr,
|
|
|
|
LastScrape: target.LastScrape(),
|
|
|
|
LastScrapeDuration: target.LastScrapeDuration().Seconds(),
|
|
|
|
Health: target.Health(),
|
|
|
|
})
|
|
|
|
}
|
2018-02-21 17:26:18 +00:00
|
|
|
}
|
2019-11-11 21:42:24 +00:00
|
|
|
} else {
|
|
|
|
res.ActiveTargets = []*Target{}
|
|
|
|
}
|
|
|
|
if showDropped {
|
|
|
|
tDropped := flatten(api.targetRetriever.TargetsDropped())
|
|
|
|
res.DroppedTargets = make([]*DroppedTarget, 0, len(tDropped))
|
|
|
|
for _, t := range tDropped {
|
|
|
|
res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{
|
|
|
|
DiscoveredLabels: t.DiscoveredLabels().Map(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
res.DroppedTargets = []*DroppedTarget{}
|
2018-02-21 17:26:18 +00:00
|
|
|
}
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2016-12-02 12:31:43 +00:00
|
|
|
}
|
|
|
|
|
2018-11-08 14:11:38 +00:00
|
|
|
func matchLabels(lset labels.Labels, matchers []*labels.Matcher) bool {
|
|
|
|
for _, m := range matchers {
|
|
|
|
if !m.Matches(lset.Get(m.Name)) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) targetMetadata(r *http.Request) apiFuncResult {
|
2018-05-18 07:32:11 +00:00
|
|
|
limit := -1
|
|
|
|
if s := r.FormValue("limit"); s != "" {
|
|
|
|
var err error
|
|
|
|
if limit, err = strconv.Atoi(s); err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil}
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-14 11:09:44 +00:00
|
|
|
matchTarget := r.FormValue("match_target")
|
|
|
|
|
|
|
|
var matchers []*labels.Matcher
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if matchTarget != "" {
|
|
|
|
matchers, err = promql.ParseMetricSelector(matchTarget)
|
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
|
|
|
}
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
|
|
|
|
2018-06-05 10:30:19 +00:00
|
|
|
metric := r.FormValue("metric")
|
2018-05-18 07:32:11 +00:00
|
|
|
|
2019-12-10 14:56:16 +00:00
|
|
|
res := []metricMetadata{}
|
2018-09-26 09:20:56 +00:00
|
|
|
for _, tt := range api.targetRetriever.TargetsActive() {
|
|
|
|
for _, t := range tt {
|
|
|
|
if limit >= 0 && len(res) >= limit {
|
|
|
|
break
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
2018-11-08 14:11:38 +00:00
|
|
|
// Filter targets that don't satisfy the label matchers.
|
2019-11-14 11:09:44 +00:00
|
|
|
if matchTarget != "" && !matchLabels(t.Labels(), matchers) {
|
2018-11-08 14:11:38 +00:00
|
|
|
continue
|
2018-09-26 09:20:56 +00:00
|
|
|
}
|
|
|
|
// If no metric is specified, get the full list for the target.
|
|
|
|
if metric == "" {
|
|
|
|
for _, md := range t.MetadataList() {
|
|
|
|
res = append(res, metricMetadata{
|
|
|
|
Target: t.Labels(),
|
|
|
|
Metric: md.Metric,
|
|
|
|
Type: md.Type,
|
|
|
|
Help: md.Help,
|
2018-10-05 16:11:16 +00:00
|
|
|
Unit: md.Unit,
|
2018-09-26 09:20:56 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Get metadata for the specified metric.
|
|
|
|
if md, ok := t.Metadata(metric); ok {
|
2018-05-18 07:32:11 +00:00
|
|
|
res = append(res, metricMetadata{
|
|
|
|
Target: t.Labels(),
|
|
|
|
Type: md.Type,
|
|
|
|
Help: md.Help,
|
2018-10-05 16:11:16 +00:00
|
|
|
Unit: md.Unit,
|
2018-05-18 07:32:11 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-10 14:56:16 +00:00
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type metricMetadata struct {
|
|
|
|
Target labels.Labels `json:"target"`
|
|
|
|
Metric string `json:"metric,omitempty"`
|
|
|
|
Type textparse.MetricType `json:"type"`
|
|
|
|
Help string `json:"help"`
|
2018-10-05 16:11:16 +00:00
|
|
|
Unit string `json:"unit"`
|
2018-05-18 07:32:11 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 17:51:27 +00:00
|
|
|
// AlertmanagerDiscovery has all the active Alertmanagers.
|
2017-01-13 09:20:11 +00:00
|
|
|
type AlertmanagerDiscovery struct {
|
2018-02-21 09:00:07 +00:00
|
|
|
ActiveAlertmanagers []*AlertmanagerTarget `json:"activeAlertmanagers"`
|
|
|
|
DroppedAlertmanagers []*AlertmanagerTarget `json:"droppedAlertmanagers"`
|
2017-01-13 09:20:11 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 17:51:27 +00:00
|
|
|
// AlertmanagerTarget has info on one AM.
|
2017-01-13 09:20:11 +00:00
|
|
|
type AlertmanagerTarget struct {
|
|
|
|
URL string `json:"url"`
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) alertmanagers(r *http.Request) apiFuncResult {
|
2017-01-13 09:20:11 +00:00
|
|
|
urls := api.alertmanagerRetriever.Alertmanagers()
|
2018-02-21 09:00:07 +00:00
|
|
|
droppedURLS := api.alertmanagerRetriever.DroppedAlertmanagers()
|
|
|
|
ams := &AlertmanagerDiscovery{ActiveAlertmanagers: make([]*AlertmanagerTarget, len(urls)), DroppedAlertmanagers: make([]*AlertmanagerTarget, len(droppedURLS))}
|
2017-04-25 05:42:33 +00:00
|
|
|
for i, url := range urls {
|
|
|
|
ams.ActiveAlertmanagers[i] = &AlertmanagerTarget{URL: url.String()}
|
2017-01-13 09:20:11 +00:00
|
|
|
}
|
2018-02-21 09:00:07 +00:00
|
|
|
for i, url := range droppedURLS {
|
|
|
|
ams.DroppedAlertmanagers[i] = &AlertmanagerTarget{URL: url.String()}
|
|
|
|
}
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{ams, nil, nil, nil}
|
2017-01-13 09:20:11 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
// AlertDiscovery has info for all active alerts.
|
2018-03-25 16:50:34 +00:00
|
|
|
type AlertDiscovery struct {
|
2018-06-27 07:15:17 +00:00
|
|
|
Alerts []*Alert `json:"alerts"`
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
// Alert has info for an alert.
|
2018-03-25 16:50:34 +00:00
|
|
|
type Alert struct {
|
|
|
|
Labels labels.Labels `json:"labels"`
|
2018-06-27 07:15:17 +00:00
|
|
|
Annotations labels.Labels `json:"annotations"`
|
|
|
|
State string `json:"state"`
|
|
|
|
ActiveAt *time.Time `json:"activeAt,omitempty"`
|
2019-05-21 09:41:54 +00:00
|
|
|
Value string `json:"value"`
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) alerts(r *http.Request) apiFuncResult {
|
2018-06-27 07:15:17 +00:00
|
|
|
alertingRules := api.rulesRetriever.AlertingRules()
|
|
|
|
alerts := []*Alert{}
|
2018-03-25 16:50:34 +00:00
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
for _, alertingRule := range alertingRules {
|
|
|
|
alerts = append(
|
|
|
|
alerts,
|
|
|
|
rulesAlertsToAPIAlerts(alertingRule.ActiveAlerts())...,
|
|
|
|
)
|
|
|
|
}
|
2018-03-25 16:50:34 +00:00
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
res := &AlertDiscovery{Alerts: alerts}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-06-27 07:15:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func rulesAlertsToAPIAlerts(rulesAlerts []*rules.Alert) []*Alert {
|
|
|
|
apiAlerts := make([]*Alert, len(rulesAlerts))
|
|
|
|
for i, ruleAlert := range rulesAlerts {
|
|
|
|
apiAlerts[i] = &Alert{
|
|
|
|
Labels: ruleAlert.Labels,
|
|
|
|
Annotations: ruleAlert.Annotations,
|
|
|
|
State: ruleAlert.State.String(),
|
|
|
|
ActiveAt: &ruleAlert.ActiveAt,
|
2019-05-21 09:41:54 +00:00
|
|
|
Value: strconv.FormatFloat(ruleAlert.Value, 'e', -1, 64),
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
return apiAlerts
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 14:56:16 +00:00
|
|
|
type metadata struct {
|
|
|
|
Type textparse.MetricType `json:"type"`
|
|
|
|
Help string `json:"help"`
|
|
|
|
Unit string `json:"unit"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *API) metricMetadata(r *http.Request) apiFuncResult {
|
|
|
|
metrics := map[string]map[metadata]struct{}{}
|
|
|
|
|
|
|
|
limit := -1
|
|
|
|
if s := r.FormValue("limit"); s != "" {
|
|
|
|
var err error
|
|
|
|
if limit, err = strconv.Atoi(s); err != nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("limit must be a number")}, nil, nil}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-10 15:22:10 +00:00
|
|
|
metric := r.FormValue("metric")
|
|
|
|
|
2019-12-10 14:56:16 +00:00
|
|
|
for _, tt := range api.targetRetriever.TargetsActive() {
|
|
|
|
for _, t := range tt {
|
2019-12-10 15:22:10 +00:00
|
|
|
|
|
|
|
if metric == "" {
|
|
|
|
for _, mm := range t.MetadataList() {
|
|
|
|
m := metadata{Type: mm.Type, Help: mm.Help, Unit: mm.Unit}
|
|
|
|
ms, ok := metrics[mm.Metric]
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
ms = map[metadata]struct{}{}
|
|
|
|
metrics[mm.Metric] = ms
|
|
|
|
}
|
|
|
|
ms[m] = struct{}{}
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if md, ok := t.Metadata(metric); ok {
|
|
|
|
m := metadata{Type: md.Type, Help: md.Help, Unit: md.Unit}
|
|
|
|
ms, ok := metrics[md.Metric]
|
2019-12-10 14:56:16 +00:00
|
|
|
|
|
|
|
if !ok {
|
|
|
|
ms = map[metadata]struct{}{}
|
2019-12-10 15:22:10 +00:00
|
|
|
metrics[md.Metric] = ms
|
2019-12-10 14:56:16 +00:00
|
|
|
}
|
|
|
|
ms[m] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-10 15:22:10 +00:00
|
|
|
// Put the elements from the pseudo-set into a slice for marshaling.
|
2019-12-10 14:56:16 +00:00
|
|
|
res := map[string][]metadata{}
|
|
|
|
|
|
|
|
for name, set := range metrics {
|
|
|
|
if limit >= 0 && len(res) >= limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
s := []metadata{}
|
|
|
|
for metadata := range set {
|
|
|
|
s = append(s, metadata)
|
|
|
|
}
|
|
|
|
res[name] = s
|
|
|
|
}
|
|
|
|
|
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
// RuleDiscovery has info for all rules
|
|
|
|
type RuleDiscovery struct {
|
|
|
|
RuleGroups []*RuleGroup `json:"groups"`
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
// RuleGroup has info for rules which are part of a group
|
|
|
|
type RuleGroup struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
File string `json:"file"`
|
|
|
|
// In order to preserve rule ordering, while exposing type (alerting or recording)
|
|
|
|
// specific properties, both alerting and recording rules are exposed in the
|
|
|
|
// same array.
|
|
|
|
Rules []rule `json:"rules"`
|
|
|
|
Interval float64 `json:"interval"`
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
type rule interface{}
|
2018-03-25 16:50:34 +00:00
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
type alertingRule struct {
|
2019-12-09 22:42:59 +00:00
|
|
|
// State can be "pending", "firing", "inactive".
|
|
|
|
State string `json:"state"`
|
2018-08-23 13:00:10 +00:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Duration float64 `json:"duration"`
|
|
|
|
Labels labels.Labels `json:"labels"`
|
|
|
|
Annotations labels.Labels `json:"annotations"`
|
|
|
|
Alerts []*Alert `json:"alerts"`
|
|
|
|
Health rules.RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
2018-06-27 07:15:17 +00:00
|
|
|
// Type of an alertingRule is always "alerting".
|
|
|
|
Type string `json:"type"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type recordingRule struct {
|
2018-08-23 13:00:10 +00:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Labels labels.Labels `json:"labels,omitempty"`
|
|
|
|
Health rules.RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
2018-06-27 07:15:17 +00:00
|
|
|
// Type of a recordingRule is always "recording".
|
|
|
|
Type string `json:"type"`
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) rules(r *http.Request) apiFuncResult {
|
2018-06-27 07:15:17 +00:00
|
|
|
ruleGroups := api.rulesRetriever.RuleGroups()
|
|
|
|
res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))}
|
2019-12-09 22:42:59 +00:00
|
|
|
typeParam := strings.ToLower(r.URL.Query().Get("type"))
|
|
|
|
|
|
|
|
if typeParam != "" && typeParam != "alert" && typeParam != "record" {
|
|
|
|
err := errors.Errorf("invalid query parameter type='%v'", typeParam)
|
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
|
|
|
}
|
|
|
|
|
|
|
|
returnAlerts := typeParam == "" || typeParam == "alert"
|
|
|
|
returnRecording := typeParam == "" || typeParam == "record"
|
|
|
|
|
2018-06-27 07:15:17 +00:00
|
|
|
for i, grp := range ruleGroups {
|
|
|
|
apiRuleGroup := &RuleGroup{
|
|
|
|
Name: grp.Name(),
|
|
|
|
File: grp.File(),
|
|
|
|
Interval: grp.Interval().Seconds(),
|
|
|
|
Rules: []rule{},
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
2018-06-27 07:15:17 +00:00
|
|
|
for _, r := range grp.Rules() {
|
|
|
|
var enrichedRule rule
|
|
|
|
|
2018-08-23 13:00:10 +00:00
|
|
|
lastError := ""
|
|
|
|
if r.LastError() != nil {
|
|
|
|
lastError = r.LastError().Error()
|
|
|
|
}
|
2018-06-27 07:15:17 +00:00
|
|
|
switch rule := r.(type) {
|
|
|
|
case *rules.AlertingRule:
|
2019-12-09 22:42:59 +00:00
|
|
|
if !returnAlerts {
|
|
|
|
break
|
|
|
|
}
|
2018-06-27 07:15:17 +00:00
|
|
|
enrichedRule = alertingRule{
|
2019-12-09 22:42:59 +00:00
|
|
|
State: rule.State().String(),
|
2018-06-27 07:15:17 +00:00
|
|
|
Name: rule.Name(),
|
|
|
|
Query: rule.Query().String(),
|
|
|
|
Duration: rule.Duration().Seconds(),
|
|
|
|
Labels: rule.Labels(),
|
|
|
|
Annotations: rule.Annotations(),
|
|
|
|
Alerts: rulesAlertsToAPIAlerts(rule.ActiveAlerts()),
|
2018-08-23 13:00:10 +00:00
|
|
|
Health: rule.Health(),
|
|
|
|
LastError: lastError,
|
2018-06-27 07:15:17 +00:00
|
|
|
Type: "alerting",
|
|
|
|
}
|
|
|
|
case *rules.RecordingRule:
|
2019-12-09 22:42:59 +00:00
|
|
|
if !returnRecording {
|
|
|
|
break
|
|
|
|
}
|
2018-06-27 07:15:17 +00:00
|
|
|
enrichedRule = recordingRule{
|
2018-08-23 13:00:10 +00:00
|
|
|
Name: rule.Name(),
|
|
|
|
Query: rule.Query().String(),
|
|
|
|
Labels: rule.Labels(),
|
|
|
|
Health: rule.Health(),
|
|
|
|
LastError: lastError,
|
|
|
|
Type: "recording",
|
2018-06-27 07:15:17 +00:00
|
|
|
}
|
|
|
|
default:
|
2019-03-25 23:01:12 +00:00
|
|
|
err := errors.Errorf("failed to assert type of rule '%v'", rule.Name())
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
2019-12-09 22:42:59 +00:00
|
|
|
if enrichedRule != nil {
|
|
|
|
apiRuleGroup.Rules = append(apiRuleGroup.Rules, enrichedRule)
|
|
|
|
}
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
2018-06-27 07:15:17 +00:00
|
|
|
res.RuleGroups[i] = apiRuleGroup
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{res, nil, nil, nil}
|
2018-03-25 16:50:34 +00:00
|
|
|
}
|
|
|
|
|
2017-05-11 15:09:24 +00:00
|
|
|
type prometheusConfig struct {
|
|
|
|
YAML string `json:"yaml"`
|
|
|
|
}
|
|
|
|
|
2019-11-02 15:53:32 +00:00
|
|
|
func (api *API) serveRuntimeInfo(r *http.Request) apiFuncResult {
|
|
|
|
status, err := api.runtimeInfo()
|
|
|
|
if err != nil {
|
|
|
|
return apiFuncResult{status, &apiError{errorInternal, err}, nil, nil}
|
|
|
|
}
|
|
|
|
return apiFuncResult{status, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *API) serveBuildInfo(r *http.Request) apiFuncResult {
|
|
|
|
return apiFuncResult{api.buildInfo, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) serveConfig(r *http.Request) apiFuncResult {
|
2017-05-11 15:09:24 +00:00
|
|
|
cfg := &prometheusConfig{
|
|
|
|
YAML: api.config().String(),
|
|
|
|
}
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{cfg, nil, nil, nil}
|
2017-05-11 15:09:24 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) serveFlags(r *http.Request) apiFuncResult {
|
|
|
|
return apiFuncResult{api.flagsMap, nil, nil, nil}
|
api: Added v1/status/flags endpoint. (#3864)
Endpoint URL: /api/v1/status/flags
Example Output:
```json
{
"status": "success",
"data": {
"alertmanager.notification-queue-capacity": "10000",
"alertmanager.timeout": "10s",
"completion-bash": "false",
"completion-script-bash": "false",
"completion-script-zsh": "false",
"config.file": "my_cool_prometheus.yaml",
"help": "false",
"help-long": "false",
"help-man": "false",
"log.level": "info",
"query.lookback-delta": "5m",
"query.max-concurrency": "20",
"query.timeout": "2m",
"storage.tsdb.max-block-duration": "36h",
"storage.tsdb.min-block-duration": "2h",
"storage.tsdb.no-lockfile": "false",
"storage.tsdb.path": "data/",
"storage.tsdb.retention": "15d",
"version": "false",
"web.console.libraries": "console_libraries",
"web.console.templates": "consoles",
"web.enable-admin-api": "false",
"web.enable-lifecycle": "false",
"web.external-url": "",
"web.listen-address": "0.0.0.0:9090",
"web.max-connections": "512",
"web.read-timeout": "5m",
"web.route-prefix": "/",
"web.user-assets": ""
}
}
```
Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
2018-02-21 08:49:02 +00:00
|
|
|
}
|
|
|
|
|
2019-11-12 10:15:20 +00:00
|
|
|
// stat holds the information about individual cardinality.
|
|
|
|
type stat struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Value uint64 `json:"value"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// tsdbStatus has information of cardinality statistics from postings.
|
|
|
|
type tsdbStatus struct {
|
|
|
|
SeriesCountByMetricName []stat `json:"seriesCountByMetricName"`
|
|
|
|
LabelValueCountByLabelName []stat `json:"labelValueCountByLabelName"`
|
|
|
|
MemoryInBytesByLabelName []stat `json:"memoryInBytesByLabelName"`
|
|
|
|
SeriesCountByLabelValuePair []stat `json:"seriesCountByLabelValuePair"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *API) serveTSDBStatus(r *http.Request) apiFuncResult {
|
|
|
|
db := api.db()
|
|
|
|
if db == nil {
|
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("TSDB not ready")}, nil, nil}
|
|
|
|
}
|
|
|
|
convert := func(stats []index.Stat) []stat {
|
|
|
|
result := make([]stat, 0, len(stats))
|
|
|
|
for _, item := range stats {
|
|
|
|
item := stat{Name: item.Name, Value: item.Count}
|
|
|
|
result = append(result, item)
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
posting := db.Head().PostingsCardinalityStats(model.MetricNameLabel)
|
|
|
|
response := tsdbStatus{
|
|
|
|
SeriesCountByMetricName: convert(posting.CardinalityMetricsStats),
|
|
|
|
LabelValueCountByLabelName: convert(posting.CardinalityLabelStats),
|
|
|
|
MemoryInBytesByLabelName: convert(posting.LabelValueStats),
|
|
|
|
SeriesCountByLabelValuePair: convert(posting.LabelValuePairsStats),
|
|
|
|
}
|
|
|
|
|
|
|
|
return apiFuncResult{response, nil, nil, nil}
|
|
|
|
}
|
|
|
|
|
2017-10-23 20:28:17 +00:00
|
|
|
func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
|
2019-08-19 20:16:10 +00:00
|
|
|
ctx := r.Context()
|
|
|
|
if err := api.remoteReadGate.Start(ctx); err != nil {
|
2019-05-03 13:11:28 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2018-10-10 23:09:08 +00:00
|
|
|
remoteReadQueries.Inc()
|
|
|
|
|
2018-09-25 19:07:34 +00:00
|
|
|
defer api.remoteReadGate.Done()
|
2018-10-10 23:09:08 +00:00
|
|
|
defer remoteReadQueries.Dec()
|
2018-09-25 19:07:34 +00:00
|
|
|
|
2017-10-23 20:28:17 +00:00
|
|
|
req, err := remote.DecodeReadRequest(r)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
externalLabels := api.config().GlobalConfig.ExternalLabels.Map()
|
|
|
|
|
|
|
|
sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels))
|
|
|
|
for name, value := range externalLabels {
|
|
|
|
sortedExternalLabels = append(sortedExternalLabels, prompb.Label{
|
|
|
|
Name: string(name),
|
|
|
|
Value: string(value),
|
|
|
|
})
|
2017-10-23 20:28:17 +00:00
|
|
|
}
|
2019-08-19 20:16:10 +00:00
|
|
|
sort.Slice(sortedExternalLabels, func(i, j int) bool {
|
|
|
|
return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
responseType, err := remote.NegotiateResponseType(req.AcceptedResponseTypes)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
switch responseType {
|
|
|
|
case prompb.ReadRequest_STREAMED_XOR_CHUNKS:
|
|
|
|
w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
|
|
|
|
|
|
|
|
f, ok := w.(http.Flusher)
|
|
|
|
if !ok {
|
|
|
|
http.Error(w, "internal http.ResponseWriter does not implement http.Flusher interface", http.StatusInternalServerError)
|
2017-10-23 20:28:17 +00:00
|
|
|
return
|
|
|
|
}
|
2019-08-19 20:16:10 +00:00
|
|
|
for i, query := range req.Queries {
|
|
|
|
err := api.remoteReadQuery(ctx, query, externalLabels, func(set storage.SeriesSet) error {
|
|
|
|
|
|
|
|
return remote.StreamChunkedReadResponses(
|
|
|
|
remote.NewChunkedWriter(w, f),
|
|
|
|
int64(i),
|
|
|
|
set,
|
|
|
|
sortedExternalLabels,
|
|
|
|
api.remoteReadMaxBytesInFrame,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if httpErr, ok := err.(remote.HTTPError); ok {
|
|
|
|
http.Error(w, httpErr.Error(), httpErr.Status())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
w.Header().Set("Content-Type", "application/x-protobuf")
|
|
|
|
w.Header().Set("Content-Encoding", "snappy")
|
2017-10-23 20:28:17 +00:00
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
// On empty or unknown types in req.AcceptedResponseTypes we default to non streamed, raw samples response.
|
|
|
|
resp := prompb.ReadResponse{
|
|
|
|
Results: make([]*prompb.QueryResult, len(req.Queries)),
|
2017-10-23 20:28:17 +00:00
|
|
|
}
|
2019-08-19 20:16:10 +00:00
|
|
|
for i, query := range req.Queries {
|
|
|
|
err := api.remoteReadQuery(ctx, query, externalLabels, func(set storage.SeriesSet) error {
|
2017-10-23 20:28:17 +00:00
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
resp.Results[i], err = remote.ToQueryResult(set, api.remoteReadSampleLimit)
|
2017-10-23 20:28:17 +00:00
|
|
|
if err != nil {
|
2019-08-19 20:16:10 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ts := range resp.Results[i].Timeseries {
|
|
|
|
ts.Labels = remote.MergeLabels(ts.Labels, sortedExternalLabels)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if httpErr, ok := err.(remote.HTTPError); ok {
|
|
|
|
http.Error(w, httpErr.Error(), httpErr.Status())
|
2017-10-23 20:28:17 +00:00
|
|
|
return
|
|
|
|
}
|
2019-08-19 20:16:10 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
2017-10-23 20:28:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
if err := remote.EncodeReadResponse(&resp, w); err != nil {
|
2017-11-23 12:50:06 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2019-08-19 20:16:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// filterExtLabelsFromMatchers change equality matchers which match external labels
|
|
|
|
// to a matcher that looks for an empty label,
|
|
|
|
// as that label should not be present in the storage.
|
|
|
|
func filterExtLabelsFromMatchers(pbMatchers []*prompb.LabelMatcher, externalLabels map[string]string) ([]*labels.Matcher, error) {
|
|
|
|
matchers, err := remote.FromLabelMatchers(pbMatchers)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
filteredMatchers := make([]*labels.Matcher, 0, len(matchers))
|
|
|
|
for _, m := range matchers {
|
|
|
|
value := externalLabels[m.Name]
|
|
|
|
if m.Type == labels.MatchEqual && value == m.Value {
|
|
|
|
matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-09-05 13:50:50 +00:00
|
|
|
}
|
2019-08-19 20:16:10 +00:00
|
|
|
filteredMatchers = append(filteredMatchers, matcher)
|
|
|
|
} else {
|
|
|
|
filteredMatchers = append(filteredMatchers, m)
|
2017-10-23 20:28:17 +00:00
|
|
|
}
|
2019-08-19 20:16:10 +00:00
|
|
|
}
|
2017-10-23 20:28:17 +00:00
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
return filteredMatchers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (api *API) remoteReadQuery(ctx context.Context, query *prompb.Query, externalLabels map[string]string, seriesHandleFn func(set storage.SeriesSet) error) error {
|
|
|
|
filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
querier, err := api.Queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-26 10:44:49 +00:00
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
var selectParams *storage.SelectParams
|
|
|
|
if query.Hints != nil {
|
|
|
|
selectParams = &storage.SelectParams{
|
|
|
|
Start: query.Hints.StartMs,
|
|
|
|
End: query.Hints.EndMs,
|
|
|
|
Step: query.Hints.StepMs,
|
|
|
|
Func: query.Hints.Func,
|
2017-10-23 20:28:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-19 20:16:10 +00:00
|
|
|
defer func() {
|
|
|
|
if err := querier.Close(); err != nil {
|
|
|
|
level.Warn(api.logger).Log("msg", "error on querier close", "err", err.Error())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
set, _, err := querier.Select(selectParams, filteredMatchers...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-10-23 20:28:17 +00:00
|
|
|
}
|
2019-08-19 20:16:10 +00:00
|
|
|
return seriesHandleFn(set)
|
2017-10-23 20:28:17 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) deleteSeries(r *http.Request) apiFuncResult {
|
2017-12-03 05:07:05 +00:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 13:47:38 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
db := api.db()
|
|
|
|
if db == nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("TSDB not ready")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
2018-08-17 15:24:35 +00:00
|
|
|
if err := r.ParseForm(); err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrap(err, "error parsing form values")}, nil, nil}
|
2018-08-17 15:24:35 +00:00
|
|
|
}
|
2017-12-03 05:07:05 +00:00
|
|
|
if len(r.Form["match[]"]) == 0 {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.New("no match[] parameter provided")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var start time.Time
|
|
|
|
if t := r.FormValue("start"); t != "" {
|
|
|
|
var err error
|
|
|
|
start, err = parseTime(t)
|
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
start = minTime
|
|
|
|
}
|
|
|
|
|
|
|
|
var end time.Time
|
|
|
|
if t := r.FormValue("end"); t != "" {
|
|
|
|
var err error
|
|
|
|
end, err = parseTime(t)
|
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
end = maxTime
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, s := range r.Form["match[]"] {
|
|
|
|
matchers, err := promql.ParseMetricSelector(s)
|
|
|
|
if err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
2019-11-18 19:53:33 +00:00
|
|
|
if err := db.Delete(timestamp.FromTime(start), timestamp.FromTime(end), matchers...); err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) snapshot(r *http.Request) apiFuncResult {
|
2017-12-03 05:07:05 +00:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 13:47:38 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
2018-10-02 10:48:07 +00:00
|
|
|
var (
|
|
|
|
skipHead bool
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
if r.FormValue("skip_head") != "" {
|
|
|
|
skipHead, err = strconv.ParseBool(r.FormValue("skip_head"))
|
|
|
|
if err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "unable to parse boolean 'skip_head' argument")}, nil, nil}
|
2018-10-02 10:48:07 +00:00
|
|
|
}
|
2018-08-17 15:24:35 +00:00
|
|
|
}
|
2018-03-08 10:43:41 +00:00
|
|
|
|
2017-12-03 05:07:05 +00:00
|
|
|
db := api.db()
|
|
|
|
if db == nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("TSDB not ready")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
snapdir = filepath.Join(db.Dir(), "snapshots")
|
2017-12-10 19:19:34 +00:00
|
|
|
name = fmt.Sprintf("%s-%x",
|
|
|
|
time.Now().UTC().Format("20060102T150405Z0700"),
|
|
|
|
rand.Int())
|
|
|
|
dir = filepath.Join(snapdir, name)
|
2017-12-03 05:07:05 +00:00
|
|
|
)
|
|
|
|
if err := os.MkdirAll(dir, 0777); err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot directory")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
2018-03-08 10:43:41 +00:00
|
|
|
if err := db.Snapshot(dir, !skipHead); err != nil {
|
2019-03-25 23:01:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, errors.Wrap(err, "create snapshot")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{struct {
|
2017-12-03 05:07:05 +00:00
|
|
|
Name string `json:"name"`
|
2018-11-30 14:27:12 +00:00
|
|
|
}{name}, nil, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) cleanTombstones(r *http.Request) apiFuncResult {
|
2017-12-03 05:07:05 +00:00
|
|
|
if !api.enableAdmin {
|
2019-01-04 13:47:38 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("admin APIs disabled")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
db := api.db()
|
|
|
|
if db == nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorUnavailable, errors.New("TSDB not ready")}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := db.CleanTombstones(); err != nil {
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
return apiFuncResult{nil, nil, nil, nil}
|
2017-12-03 05:07:05 +00:00
|
|
|
}
|
|
|
|
|
2018-11-30 14:27:12 +00:00
|
|
|
func (api *API) respond(w http.ResponseWriter, data interface{}, warnings storage.Warnings) {
|
|
|
|
statusMessage := statusSuccess
|
|
|
|
var warningStrings []string
|
|
|
|
for _, warning := range warnings {
|
|
|
|
warningStrings = append(warningStrings, warning.Error())
|
|
|
|
}
|
2017-12-02 13:52:59 +00:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2015-06-04 16:07:57 +00:00
|
|
|
b, err := json.Marshal(&response{
|
2018-11-30 14:27:12 +00:00
|
|
|
Status: statusMessage,
|
|
|
|
Data: data,
|
|
|
|
Warnings: warningStrings,
|
2015-06-04 16:07:57 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2018-11-27 16:44:29 +00:00
|
|
|
level.Error(api.logger).Log("msg", "error marshaling json response", "err", err)
|
2018-07-13 18:31:23 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2015-06-04 16:07:57 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2018-07-13 18:31:23 +00:00
|
|
|
w.WriteHeader(http.StatusOK)
|
2018-07-06 17:44:45 +00:00
|
|
|
if n, err := w.Write(b); err != nil {
|
2018-07-25 12:35:47 +00:00
|
|
|
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
|
2018-07-06 17:44:45 +00:00
|
|
|
}
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
2018-07-06 17:44:45 +00:00
|
|
|
func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data interface{}) {
|
2018-07-25 12:17:10 +00:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
|
|
|
b, err := json.Marshal(&response{
|
|
|
|
Status: statusError,
|
|
|
|
ErrorType: apiErr.typ,
|
|
|
|
Error: apiErr.err.Error(),
|
|
|
|
Data: data,
|
|
|
|
})
|
2019-11-02 15:53:32 +00:00
|
|
|
|
2018-07-25 12:17:10 +00:00
|
|
|
if err != nil {
|
2018-11-27 16:44:29 +00:00
|
|
|
level.Error(api.logger).Log("msg", "error marshaling json response", "err", err)
|
2018-07-25 12:17:10 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2015-11-11 22:00:54 +00:00
|
|
|
|
|
|
|
var code int
|
|
|
|
switch apiErr.typ {
|
|
|
|
case errorBadData:
|
|
|
|
code = http.StatusBadRequest
|
|
|
|
case errorExec:
|
|
|
|
code = 422
|
|
|
|
case errorCanceled, errorTimeout:
|
|
|
|
code = http.StatusServiceUnavailable
|
2017-04-04 16:22:51 +00:00
|
|
|
case errorInternal:
|
|
|
|
code = http.StatusInternalServerError
|
2018-05-18 07:32:11 +00:00
|
|
|
case errorNotFound:
|
|
|
|
code = http.StatusNotFound
|
2015-11-11 22:00:54 +00:00
|
|
|
default:
|
|
|
|
code = http.StatusInternalServerError
|
|
|
|
}
|
2015-06-04 16:07:57 +00:00
|
|
|
|
2018-07-25 12:17:10 +00:00
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
w.WriteHeader(code)
|
2018-07-06 17:44:45 +00:00
|
|
|
if n, err := w.Write(b); err != nil {
|
2018-07-25 12:35:47 +00:00
|
|
|
level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err)
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-24 23:37:46 +00:00
|
|
|
func parseTime(s string) (time.Time, error) {
|
2015-06-04 16:07:57 +00:00
|
|
|
if t, err := strconv.ParseFloat(s, 64); err == nil {
|
2016-12-24 23:37:46 +00:00
|
|
|
s, ns := math.Modf(t)
|
2018-12-03 12:25:54 +00:00
|
|
|
ns = math.Round(ns*1000) / 1000
|
2016-12-24 23:37:46 +00:00
|
|
|
return time.Unix(int64(s), int64(ns*float64(time.Second))), nil
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
|
2016-12-24 23:37:46 +00:00
|
|
|
return t, nil
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2019-07-08 09:43:59 +00:00
|
|
|
|
|
|
|
// Stdlib's time parser can only handle 4 digit years. As a workaround until
|
|
|
|
// that is fixed we want to at least support our own boundary times.
|
|
|
|
// Context: https://github.com/prometheus/client_golang/issues/614
|
|
|
|
// Upstream issue: https://github.com/golang/go/issues/20555
|
|
|
|
switch s {
|
|
|
|
case minTimeFormatted:
|
|
|
|
return minTime, nil
|
|
|
|
case maxTimeFormatted:
|
|
|
|
return maxTime, nil
|
|
|
|
}
|
2019-03-25 23:01:12 +00:00
|
|
|
return time.Time{}, errors.Errorf("cannot parse %q to a valid timestamp", s)
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func parseDuration(s string) (time.Duration, error) {
|
|
|
|
if d, err := strconv.ParseFloat(s, 64); err == nil {
|
2017-03-16 14:16:20 +00:00
|
|
|
ts := d * float64(time.Second)
|
|
|
|
if ts > float64(math.MaxInt64) || ts < float64(math.MinInt64) {
|
2019-03-25 23:01:12 +00:00
|
|
|
return 0, errors.Errorf("cannot parse %q to a valid duration. It overflows int64", s)
|
2017-03-16 14:16:20 +00:00
|
|
|
}
|
|
|
|
return time.Duration(ts), nil
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2016-01-29 14:23:11 +00:00
|
|
|
if d, err := model.ParseDuration(s); err == nil {
|
|
|
|
return time.Duration(d), nil
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2019-03-25 23:01:12 +00:00
|
|
|
return 0, errors.Errorf("cannot parse %q to a valid duration", s)
|
2015-06-04 16:07:57 +00:00
|
|
|
}
|
2018-02-07 12:27:57 +00:00
|
|
|
|
|
|
|
func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) {
|
|
|
|
p := *((*promql.Point)(ptr))
|
|
|
|
stream.WriteArrayStart()
|
|
|
|
// Write out the timestamp as a float divided by 1000.
|
|
|
|
// This is ~3x faster than converting to a float.
|
|
|
|
t := p.T
|
|
|
|
if t < 0 {
|
|
|
|
stream.WriteRaw(`-`)
|
|
|
|
t = -t
|
|
|
|
}
|
|
|
|
stream.WriteInt64(t / 1000)
|
|
|
|
fraction := t % 1000
|
|
|
|
if fraction != 0 {
|
|
|
|
stream.WriteRaw(`.`)
|
|
|
|
if fraction < 100 {
|
|
|
|
stream.WriteRaw(`0`)
|
|
|
|
}
|
|
|
|
if fraction < 10 {
|
|
|
|
stream.WriteRaw(`0`)
|
|
|
|
}
|
|
|
|
stream.WriteInt64(fraction)
|
|
|
|
}
|
|
|
|
stream.WriteMore()
|
|
|
|
stream.WriteRaw(`"`)
|
2019-09-17 13:52:26 +00:00
|
|
|
|
|
|
|
// Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround
|
|
|
|
// to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan).
|
|
|
|
buf := stream.Buffer()
|
|
|
|
abs := math.Abs(p.V)
|
|
|
|
fmt := byte('f')
|
|
|
|
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
|
|
|
if abs != 0 {
|
|
|
|
if abs < 1e-6 || abs >= 1e21 {
|
|
|
|
fmt = 'e'
|
|
|
|
}
|
|
|
|
}
|
|
|
|
buf = strconv.AppendFloat(buf, p.V, fmt, -1, 64)
|
|
|
|
stream.SetBuffer(buf)
|
|
|
|
|
2018-02-07 12:27:57 +00:00
|
|
|
stream.WriteRaw(`"`)
|
|
|
|
stream.WriteArrayEnd()
|
|
|
|
}
|
|
|
|
|
|
|
|
func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool {
|
|
|
|
return false
|
|
|
|
}
|