Merge pull request #26 from grafana/update-upstream

Upgrade upstream
owilliams/utf8-02-mimir
Marco Pracucci 3 years ago committed by GitHub
commit a882d28d36
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -106,40 +106,30 @@ func init() {
}
}
// agentOnlySetting can be provided to a kingpin flag's PreAction to mark a
// flag as agent-only.
func agentOnlySetting() func(*kingpin.ParseContext) error {
return func(pc *kingpin.ParseContext) error {
agentOnlyFlags = append(agentOnlyFlags, extractFlagName(pc))
return nil
}
}
// serverOnlySetting can be provided to a kingpin flag's PreAction to mark a
// flag as server-only.
func serverOnlySetting() func(*kingpin.ParseContext) error {
return func(pc *kingpin.ParseContext) error {
serverOnlyFlags = append(serverOnlyFlags, extractFlagName(pc))
return nil
}
// serverOnlyFlag creates server-only kingpin flag.
func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause {
return app.Flag(name, fmt.Sprintf("%s Use with server mode only.", help)).
PreAction(func(parseContext *kingpin.ParseContext) error {
// This will be invoked only if flag is actually provided by user.
serverOnlyFlags = append(serverOnlyFlags, "--"+name)
return nil
})
}
// extractFlagName gets the flag name from the ParseContext. Only call
// from agentOnlySetting or serverOnlySetting.
func extractFlagName(pc *kingpin.ParseContext) string {
for _, pe := range pc.Elements {
fc, ok := pe.Clause.(*kingpin.FlagClause)
if !ok {
continue
}
return "--" + fc.Model().Name
}
panic("extractFlagName not called from a kingpin PreAction. This is a bug, please report to Prometheus.")
// agentOnlyFlag creates agent-only kingpin flag.
func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause {
return app.Flag(name, fmt.Sprintf("%s Use with agent mode only.", help)).
PreAction(func(parseContext *kingpin.ParseContext) error {
// This will be invoked only if flag is actually provided by user.
agentOnlyFlags = append(agentOnlyFlags, "--"+name)
return nil
})
}
type flagConfig struct {
configFile string
agentStoragePath string
localStoragePath string
notifier notifier.Options
forGracePeriod model.Duration
@ -285,106 +275,83 @@ func main() {
a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com'`).
Default(".*").StringVar(&cfg.corsRegexString)
a.Flag("storage.tsdb.path", "Base path for metrics storage.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.tsdb.path", "Base path for metrics storage.").
Default("data/").StringVar(&cfg.localStoragePath)
a.Flag("storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing.").
Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration)
a.Flag("storage.tsdb.max-block-duration",
serverOnlyFlag(a, "storage.tsdb.max-block-duration",
"Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period.)").
PreAction(serverOnlySetting()).
Hidden().PlaceHolder("<duration>").SetValue(&cfg.tsdb.MaxBlockDuration)
a.Flag("storage.tsdb.max-block-chunk-segment-size",
serverOnlyFlag(a, "storage.tsdb.max-block-chunk-segment-size",
"The maximum size for a single chunk segment in a block. Example: 512MB").
PreAction(serverOnlySetting()).
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.MaxBlockChunkSegmentSize)
a.Flag("storage.tsdb.wal-segment-size",
serverOnlyFlag(a, "storage.tsdb.wal-segment-size",
"Size at which to split the tsdb WAL segment files. Example: 100MB").
PreAction(serverOnlySetting()).
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.WALSegmentSize)
a.Flag("storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
SetValue(&oldFlagRetentionDuration)
a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
SetValue(&newFlagRetentionDuration)
a.Flag("storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
BytesVar(&cfg.tsdb.MaxBytes)
a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
a.Flag("storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks)
a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
a.Flag("storage.agent.path", "Base path for metrics storage.").
PreAction(agentOnlySetting()).
Default("data-agent/").StringVar(&cfg.localStoragePath)
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
Default("data-agent/").StringVar(&cfg.agentStoragePath)
a.Flag("storage.agent.wal-segment-size",
agentOnlyFlag(a, "storage.agent.wal-segment-size",
"Size at which to split WAL segment files. Example: 100MB").
PreAction(agentOnlySetting()).
Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.agent.WALSegmentSize)
a.Flag("storage.agent.wal-compression", "Compress the agent WAL.").
PreAction(agentOnlySetting()).
agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL.").
Default("true").BoolVar(&cfg.agent.WALCompression)
a.Flag("storage.agent.wal-truncate-frequency",
agentOnlyFlag(a, "storage.agent.wal-truncate-frequency",
"The frequency at which to truncate the WAL and remove old data.").
PreAction(agentOnlySetting()).
Hidden().PlaceHolder("<duration>").SetValue(&cfg.agent.TruncateFrequency)
a.Flag("storage.agent.retention.min-time",
agentOnlyFlag(a, "storage.agent.retention.min-time",
"Minimum age samples may be before being considered for deletion when the WAL is truncated").
PreAction(agentOnlySetting()).
SetValue(&cfg.agent.MinWALTime)
a.Flag("storage.agent.retention.max-time",
agentOnlyFlag(a, "storage.agent.retention.max-time",
"Maximum age samples may be before being forcibly deleted when the WAL is truncated").
PreAction(agentOnlySetting()).
SetValue(&cfg.agent.MaxWALTime)
a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload.").
Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline)
a.Flag("storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types.").
Default("5e7").IntVar(&cfg.web.RemoteReadSampleLimit)
a.Flag("storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit.").
Default("10").IntVar(&cfg.web.RemoteReadConcurrencyLimit)
a.Flag("storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default.").
Default("1048576").IntVar(&cfg.web.RemoteReadBytesInFrame)
a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert.").
Default("1h").SetValue(&cfg.outageTolerance)
a.Flag("rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period.").
Default("10m").SetValue(&cfg.forGracePeriod)
a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay)
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
@ -393,27 +360,22 @@ func main() {
a.Flag("scrape.timestamp-tolerance", "Timestamp tolerance. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
Hidden().Default("2ms").DurationVar(&scrape.ScrapeTimestampTolerance)
a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
// TODO: Remove in Prometheus 3.0.
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
a.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
Default("5m").SetValue(&cfg.lookbackDelta)
a.Flag("query.timeout", "Maximum time a query may take before being aborted.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "query.timeout", "Maximum time a query may take before being aborted.").
Default("2m").SetValue(&cfg.queryTimeout)
a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "query.max-concurrency", "Maximum number of queries executed concurrently.").
Default("20").IntVar(&cfg.queryConcurrency)
a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
PreAction(serverOnlySetting()).
serverOnlyFlag(a, "query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
Default("50000000").IntVar(&cfg.queryMaxSamples)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
@ -1023,14 +985,14 @@ func main() {
logger,
prometheus.DefaultRegisterer,
remoteStorage,
cfg.localStoragePath,
cfg.agentStoragePath,
&opts,
)
if err != nil {
return errors.Wrap(err, "opening storage failed")
}
switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType {
switch fsType := prom_runtime.Statfs(cfg.agentStoragePath); fsType {
case "NFS_SUPER_MAGIC":
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
default:

@ -14,6 +14,7 @@
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
@ -21,6 +22,7 @@ import (
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"testing"
"time"
@ -355,13 +357,11 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
func TestAgentSuccessfulStartup(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig)
err := prom.Start()
require.NoError(t, err)
require.NoError(t, prom.Start())
expectedExitStatus := 0
actualExitStatus := 0
done := make(chan error, 1)
go func() { done <- prom.Wait() }()
select {
case err := <-done:
@ -370,18 +370,43 @@ func TestAgentSuccessfulStartup(t *testing.T) {
case <-time.After(5 * time.Second):
prom.Process.Kill()
}
require.Equal(t, expectedExitStatus, actualExitStatus)
require.Equal(t, 0, actualExitStatus)
}
func TestAgentStartupWithInvalidConfig(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig)
err := prom.Start()
require.NoError(t, err)
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig)
output := bytes.Buffer{}
prom.Stderr = &output
require.NoError(t, prom.Start())
expectedExitStatus := 2
actualExitStatus := 0
done := make(chan error, 1)
go func() { done <- prom.Wait() }()
select {
case err := <-done:
t.Logf("prometheus agent should not be running: %v", err)
actualExitStatus = prom.ProcessState.ExitCode()
case <-time.After(5 * time.Second):
prom.Process.Kill()
}
require.Equal(t, 3, actualExitStatus)
// Assert on last line.
lines := strings.Split(output.String(), "\n")
last := lines[len(lines)-1]
require.Equal(t, "The following flag(s) can not be used in agent mode: [\"--storage.tsdb.path\"]", last)
}
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig)
require.NoError(t, prom.Start())
actualExitStatus := 0
done := make(chan error, 1)
go func() { done <- prom.Wait() }()
select {
case err := <-done:
@ -390,7 +415,7 @@ func TestAgentStartupWithInvalidConfig(t *testing.T) {
case <-time.After(5 * time.Second):
prom.Process.Kill()
}
require.Equal(t, expectedExitStatus, actualExitStatus)
require.Equal(t, 2, actualExitStatus)
}
func TestModeSpecificFlags(t *testing.T) {

@ -11,7 +11,7 @@ branch="repo_sync_codemirror"
commit_msg="Update codemirror"
pr_title="Synchronize codemirror from prometheus/prometheus"
pr_msg="Propagating changes from prometheus/prometheus default branch."
target_repo="prometheus-community/codemirror-promql"
target_repo="prometheus/codemirror-promql"
source_path="web/ui/module/codemirror-promql"
color_red='\e[31m'

@ -30,9 +30,19 @@ func AllPostingsKey() (name, value string) {
return allPostingsKey.Name, allPostingsKey.Value
}
// ensureOrderBatchSize is the max number of postings passed to a worker in a single batch in MemPostings.EnsureOrder().
const ensureOrderBatchSize = 1024
// ensureOrderBatchPool is a pool used to recycle batches passed to workers in MemPostings.EnsureOrder().
var ensureOrderBatchPool = sync.Pool{
New: func() interface{} {
return make([][]uint64, 0, ensureOrderBatchSize)
},
}
// MemPostings holds postings list for series ID per label pair. They may be written
// to out of order.
// ensureOrder() must be called once before any reads are done. This allows for quick
// EnsureOrder() must be called once before any reads are done. This allows for quick
// unordered batch fills on startup.
type MemPostings struct {
mtx sync.RWMutex
@ -49,7 +59,7 @@ func NewMemPostings() *MemPostings {
}
// NewUnorderedMemPostings returns a memPostings that is not safe to be read from
// until ensureOrder was called once.
// until EnsureOrder() was called once.
func NewUnorderedMemPostings() *MemPostings {
return &MemPostings{
m: make(map[string]map[string][]uint64, 512),
@ -218,25 +228,42 @@ func (p *MemPostings) EnsureOrder() {
}
n := runtime.GOMAXPROCS(0)
workc := make(chan []uint64)
workc := make(chan [][]uint64)
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
go func() {
for l := range workc {
sort.Slice(l, func(a, b int) bool { return l[a] < l[b] })
for job := range workc {
for _, l := range job {
sort.Sort(uint64Slice(l))
}
job = job[:0]
ensureOrderBatchPool.Put(job) //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty.
}
wg.Done()
}()
}
nextJob := ensureOrderBatchPool.Get().([][]uint64)
for _, e := range p.m {
for _, l := range e {
workc <- l
nextJob = append(nextJob, l)
if len(nextJob) >= ensureOrderBatchSize {
workc <- nextJob
nextJob = ensureOrderBatchPool.Get().([][]uint64)
}
}
}
// If the last job was partially filled, we need to push it to workers too.
if len(nextJob) > 0 {
workc <- nextJob
}
close(workc)
wg.Wait()
@ -818,3 +845,10 @@ func (c *PostingsCloner) Clone() Postings {
}
return newListPostings(c.ids...)
}
// uint64Slice attaches the methods of sort.Interface to []uint64, sorting in increasing order.
type uint64Slice []uint64
func (x uint64Slice) Len() int { return len(x) }
func (x uint64Slice) Less(i, j int) bool { return x[i] < x[j] }
func (x uint64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] }

@ -18,6 +18,7 @@ import (
"fmt"
"math/rand"
"sort"
"strconv"
"testing"
"github.com/stretchr/testify/require"
@ -63,6 +64,59 @@ func TestMemPostings_ensureOrder(t *testing.T) {
}
}
func BenchmarkMemPostings_ensureOrder(b *testing.B) {
tests := map[string]struct {
numLabels int
numValuesPerLabel int
numRefsPerValue int
}{
"many values per label": {
numLabels: 100,
numValuesPerLabel: 10000,
numRefsPerValue: 100,
},
"few values per label": {
numLabels: 1000000,
numValuesPerLabel: 1,
numRefsPerValue: 100,
},
"few refs per label value": {
numLabels: 1000,
numValuesPerLabel: 1000,
numRefsPerValue: 10,
},
}
for testName, testData := range tests {
b.Run(testName, func(b *testing.B) {
p := NewUnorderedMemPostings()
// Generate postings.
for l := 0; l < testData.numLabels; l++ {
labelName := strconv.Itoa(l)
p.m[labelName] = map[string][]uint64{}
for v := 0; v < testData.numValuesPerLabel; v++ {
refs := make([]uint64, testData.numRefsPerValue)
for j := range refs {
refs[j] = rand.Uint64()
}
labelValue := strconv.Itoa(v)
p.m[labelName][labelValue] = refs
}
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
p.EnsureOrder()
p.ordered = false
}
})
}
}
func TestIntersect(t *testing.T) {
a := newListPostings(1, 2, 3)
b := newListPostings(2, 3, 4)

@ -1,7 +1,7 @@
CodeMirror-promql
=================
[![CircleCI](https://circleci.com/gh/prometheus-community/codemirror-promql.svg?style=shield)](https://circleci.com/gh/prometheus-community/codemirror-promql) [![GitHub license](https://img.shields.io/badge/license-Apache-blue.svg)](./LICENSE)
[![NPM version](https://img.shields.io/npm/v/codemirror-promql.svg)](https://www.npmjs.org/package/codemirror-promql) [![codecov](https://codecov.io/gh/prometheus-community/codemirror-promql/branch/master/graph/badge.svg?token=1OSVPBDKZC)](https://codecov.io/gh/prometheus-community/codemirror-promql)
[![CircleCI](https://circleci.com/gh/prometheus/codemirror-promql.svg?style=shield)](https://circleci.com/gh/prometheus/codemirror-promql) [![GitHub license](https://img.shields.io/badge/license-Apache-blue.svg)](./LICENSE)
[![NPM version](https://img.shields.io/npm/v/codemirror-promql.svg)](https://www.npmjs.org/package/codemirror-promql) [![codecov](https://codecov.io/gh/prometheus/codemirror-promql/branch/master/graph/badge.svg?token=1OSVPBDKZC)](https://codecov.io/gh/prometheus/codemirror-promql)
## Overview
@ -13,7 +13,7 @@ and autocompletion for PromQL ([Prometheus Query Language](https://prometheus.io
## Where does it come from?
The authoritative copy of this code lives in `prometheus/prometheus` and is synced to
`prometheus-community/codemirror-promql` on a regular basis by a bot. Please contribute any code changes to the code
`prometheus/codemirror-promql` on a regular basis by a bot. Please contribute any code changes to the code
in https://github.com/prometheus/prometheus/tree/main/web/ui/module/codemirror-promql.
### Installation
@ -222,7 +222,7 @@ const promQL = new PromQLExtension().setComplete({
##### Override the default Prometheus client
In case you are not satisfied by our default Prometheus client, you can still provide your own. It has to implement the
interface [PrometheusClient](https://github.com/prometheus-community/codemirror-promql/blob/master/src/lang-promql/client/prometheus.ts#L111-L117)
interface [PrometheusClient](https://github.com/prometheus/codemirror-promql/blob/master/src/lang-promql/client/prometheus.ts#L111-L117)
.
```typescript
@ -246,4 +246,4 @@ Note: In case this parameter is provided, then the rest of the configuration is
## License
Apache License 2.0, see [LICENSE](https://github.com/prometheus-community/codemirror-promql/blob/master/LICENSE).
Apache License 2.0, see [LICENSE](https://github.com/prometheus/codemirror-promql/blob/master/LICENSE).

@ -16,7 +16,7 @@
},
"repository": {
"type": "git",
"url": "git+https://github.com/prometheus-community/codemirror-promql.git"
"url": "git+https://github.com/prometheus/codemirror-promql.git"
},
"keywords": [
"promql",
@ -27,9 +27,9 @@
"author": "Prometheus Authors <prometheus-developers@googlegroups.com>",
"license": "Apache-2.0",
"bugs": {
"url": "https://github.com/prometheus-community/codemirror-promql/issues"
"url": "https://github.com/prometheus/codemirror-promql/issues"
},
"homepage": "https://github.com/prometheus-community/codemirror-promql/blob/master/README.md",
"homepage": "https://github.com/prometheus/codemirror-promql/blob/master/README.md",
"dependencies": {
"lru-cache": "^6.0.0"
},

@ -583,7 +583,7 @@ func TestAgentAPIEndPoints(t *testing.T) {
req, err := http.NewRequest("GET", u, nil)
require.NoError(t, err)
webHandler.router.ServeHTTP(w, req)
fmt.Println(u)
fmt.Println(w.Body.String())
require.Equal(t, http.StatusOK, w.Code)
}
}

Loading…
Cancel
Save