Browse Source

chore: Fix typos (#14868)

* Fix typos

---------

Signed-off-by: Nathan Baulch <nathan.baulch@gmail.com>
pull/14898/head
Nathan Baulch 2 months ago committed by GitHub
parent
commit
50cd453c8f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 8
      CHANGELOG.md
  2. 2
      README.md
  3. 10
      cmd/promtool/tsdb.go
  4. 2
      discovery/moby/mock_test.go
  5. 16
      discovery/uyuni/uyuni.go
  6. 6
      docs/configuration/configuration.md
  7. 2
      docs/storage.md
  8. 2
      documentation/examples/prometheus-ovhcloud.yml
  9. 6
      model/histogram/float_histogram.go
  10. 2
      model/textparse/promparse_test.go
  11. 2
      model/textparse/protobufparse.go
  12. 2
      notifier/notifier_test.go
  13. 2
      promql/engine_test.go
  14. 22
      promql/query_logger.go
  15. 8
      promql/query_logger_test.go
  16. 2
      promql/value.go
  17. 2
      rules/group.go
  18. 2
      scrape/manager_test.go
  19. 6
      storage/buffer.go
  20. 2
      storage/remote/azuread/azuread_test.go
  21. 4
      storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go
  22. 2
      storage/remote/queue_manager.go
  23. 2
      storage/remote/queue_manager_test.go
  24. 2
      storage/remote/write.go
  25. 4
      tsdb/agent/db.go
  26. 2
      tsdb/chunkenc/histogram_meta_test.go
  27. 2
      tsdb/chunkenc/xor.go
  28. 2
      tsdb/chunks/chunk_write_queue.go
  29. 4
      tsdb/compact_test.go
  30. 32
      tsdb/db_test.go
  31. 2
      tsdb/exemplar.go
  32. 2
      tsdb/head.go
  33. 2
      tsdb/head_test.go
  34. 2
      tsdb/querier_test.go
  35. 2
      tsdb/record/record_test.go
  36. 12
      util/fmtutil/format.go
  37. 2
      util/runtime/limits_default.go
  38. 2
      util/zeropool/pool_test.go
  39. 4
      web/api/v1/api_test.go
  40. 2
      web/ui/module/codemirror-promql/src/parser/parser.test.ts
  41. 2
      web/ui/react-app/src/pages/graph/Panel.tsx

8
CHANGELOG.md

@ -140,7 +140,7 @@ This release changes the default for GOGC, the Go runtime control for the trade-
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838
* [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
@ -677,7 +677,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2
## 2.33.0 / 2022-01-29
* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121
* [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121
* [CHANGE] Web: Promote remote-write-receiver to stable. #10119
* [FEATURE] Config: Add `stripPort` template function. #10002
* [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045
@ -914,7 +914,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec.
* [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682
* [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659
* [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790
* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723
* [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723
* [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737
* [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766
@ -1840,7 +1840,7 @@ information, read the announcement blog post and migration guide.
## 1.7.0 / 2017-06-06
* [CHANGE] Compress remote storage requests and responses with unframed/raw snappy.
* [CHANGE] Properly ellide secrets in config.
* [CHANGE] Properly elide secrets in config.
* [FEATURE] Add OpenStack service discovery.
* [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces.
* [FEATURE] Add metric for discovered number of Alertmanagers.

2
README.md

@ -115,7 +115,7 @@ The Makefile provides several targets:
Prometheus is bundled with many service discovery plugins.
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
file to disable some service discoveries. The file is a yaml-formated list of go
file to disable some service discoveries. The file is a yaml-formatted list of go
import path that will be built into the Prometheus binary.
After you have changed the file, you

10
cmd/promtool/tsdb.go

@ -367,25 +367,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) {
fmt.Fprintf(tw,
"%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
meta.ULID,
getFormatedTime(meta.MinTime, humanReadable),
getFormatedTime(meta.MaxTime, humanReadable),
getFormattedTime(meta.MinTime, humanReadable),
getFormattedTime(meta.MaxTime, humanReadable),
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
meta.Stats.NumSamples,
meta.Stats.NumChunks,
meta.Stats.NumSeries,
getFormatedBytes(b.Size(), humanReadable),
getFormattedBytes(b.Size(), humanReadable),
)
}
}
func getFormatedTime(timestamp int64, humanReadable bool) string {
func getFormattedTime(timestamp int64, humanReadable bool) string {
if humanReadable {
return time.Unix(timestamp/1000, 0).UTC().String()
}
return strconv.FormatInt(timestamp, 10)
}
func getFormatedBytes(bytes int64, humanReadable bool) string {
func getFormattedBytes(bytes int64, humanReadable bool) string {
if humanReadable {
return units.Base2Bytes(bytes).String()
}

2
discovery/moby/mock_test.go

@ -98,7 +98,7 @@ func (m *SDMock) SetupHandlers() {
if len(query) == 2 {
h := sha1.New()
h.Write([]byte(query[1]))
// Avoing long filenames for Windows.
// Avoiding long filenames for Windows.
f += "__" + base64.URLEncoding.EncodeToString(h.Sum(nil))[:10]
}
}

16
discovery/uyuni/uyuni.go

@ -41,10 +41,10 @@ const (
uyuniMetaLabelPrefix = model.MetaLabelPrefix + "uyuni_"
uyuniLabelMinionHostname = uyuniMetaLabelPrefix + "minion_hostname"
uyuniLabelPrimaryFQDN = uyuniMetaLabelPrefix + "primary_fqdn"
uyuniLablelSystemID = uyuniMetaLabelPrefix + "system_id"
uyuniLablelGroups = uyuniMetaLabelPrefix + "groups"
uyuniLablelEndpointName = uyuniMetaLabelPrefix + "endpoint_name"
uyuniLablelExporter = uyuniMetaLabelPrefix + "exporter"
uyuniLabelSystemID = uyuniMetaLabelPrefix + "system_id"
uyuniLabelGroups = uyuniMetaLabelPrefix + "groups"
uyuniLabelEndpointName = uyuniMetaLabelPrefix + "endpoint_name"
uyuniLabelExporter = uyuniMetaLabelPrefix + "exporter"
uyuniLabelProxyModule = uyuniMetaLabelPrefix + "proxy_module"
uyuniLabelMetricsPath = uyuniMetaLabelPrefix + "metrics_path"
uyuniLabelScheme = uyuniMetaLabelPrefix + "scheme"
@ -270,10 +270,10 @@ func (d *Discovery) getEndpointLabels(
model.AddressLabel: model.LabelValue(addr),
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),
uyuniLabelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
uyuniLabelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
uyuniLabelEndpointName: model.LabelValue(endpoint.EndpointName),
uyuniLabelExporter: model.LabelValue(endpoint.ExporterName),
uyuniLabelProxyModule: model.LabelValue(endpoint.Module),
uyuniLabelMetricsPath: model.LabelValue(endpoint.Path),
uyuniLabelScheme: model.LabelValue(scheme),

6
docs/configuration/configuration.md

@ -1407,7 +1407,7 @@ authorization:
# `credentials_file`.
[ credentials: <secret> ]
# Sets the credentials to the credentials read from the configured file.
# It is mutuall exclusive with `credentials`.
# It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration, currently not supported by AWS.
@ -2627,7 +2627,7 @@ authorization:
# `credentials_file`.
[ credentials: <secret> ]
# Sets the credentials to the credentials read from the configured file.
# It is mutuall exclusive with `credentials`.
# It is mutually exclusive with `credentials`.
[ credentials_file: <filename> ]
# Optional OAuth 2.0 configuration, currently not supported by AWS.
@ -3988,7 +3988,7 @@ azuread:
# Cannot be used at the same time as basic_auth, authorization, oauth2, sigv4 or azuread.
# To use the default credentials from the Google Cloud SDK, use `google_iam: {}`.
google_iam:
# Service account key with monitoring write permessions.
# Service account key with monitoring write permissions.
credentials_file: <file_name>
# Configures the remote write request's TLS settings.

2
docs/storage.md

@ -144,7 +144,7 @@ a buffer, ensuring that older entries will be removed before the allocated stora
for Prometheus becomes full.
At present, we recommend setting the retention size to, at most, 80-85% of your
allocated Prometheus disk space. This increases the likelihood that older entires
allocated Prometheus disk space. This increases the likelihood that older entries
will be removed prior to hitting any disk limitations.
## Remote storage integrations

2
documentation/examples/prometheus-ovhcloud.yml

@ -1,4 +1,4 @@
# An example scrape configuration for running Prometheus with Ovhcloud.
# An example scrape configuration for running Prometheus with OVHcloud.
scrape_configs:
- job_name: 'ovhcloud'
ovhcloud_sd_configs:

6
model/histogram/float_histogram.go

@ -342,7 +342,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
default:
// All other cases shouldn't actually happen.
// They are a direct collision of CounterReset and NotCounterReset.
// Conservatively set the CounterResetHint to "unknown" and isse a warning.
// Conservatively set the CounterResetHint to "unknown" and issue a warning.
h.CounterResetHint = UnknownCounterReset
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
}
@ -658,7 +658,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool {
if !currIt.Next() {
// Reached end of currIt early, therefore
// previous histogram has a bucket that the
// current one does not have. Unlass all
// current one does not have. Unless all
// remaining buckets in the previous histogram
// are unpopulated, this is a reset.
for {
@ -891,7 +891,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() {
// reconcileZeroBuckets finds a zero bucket large enough to include the zero
// buckets of both histograms (the receiving histogram and the other histogram)
// with a zero threshold that is not within a populated bucket in either
// histogram. This method modifies the receiving histogram accourdingly, but
// histogram. This method modifies the receiving histogram accordingly, but
// leaves the other histogram as is. Instead, it returns the zero count the
// other histogram would have if it were modified.
func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {

2
model/textparse/promparse_test.go

@ -140,7 +140,7 @@ testmetric{label="\"bar\""} 1`
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
}, {
// NOTE: Unlike OpenMetrics, Promparse allows spaces between label terms. This appears to be unintended and should probably be fixed.
// NOTE: Unlike OpenMetrics, PromParser allows spaces between label terms. This appears to be unintended and should probably be fixed.
m: `go_gc_duration_seconds { quantile = "2.0" a = "b" }`,
v: 8.3835e-05,
lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "2.0", "a", "b"),

2
model/textparse/protobufparse.go

@ -604,7 +604,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) {
return totalLength, mf.Unmarshal(b[varIntLength:totalLength])
}
// formatOpenMetricsFloat works like the usual Go string formatting of a fleat
// formatOpenMetricsFloat works like the usual Go string formatting of a float
// but appends ".0" if the resulting number would otherwise contain neither a
// "." nor an "e".
func formatOpenMetricsFloat(f float64) string {

2
notifier/notifier_test.go

@ -743,7 +743,7 @@ func TestHangingNotifier(t *testing.T) {
// Initialize the discovery manager
// This is relevant as the updates aren't sent continually in real life, but only each updatert.
// The old implementation of TestHangingNotifier didn't take that into acount.
// The old implementation of TestHangingNotifier didn't take that into account.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
reg := prometheus.NewRegistry()

2
promql/engine_test.go

@ -1467,7 +1467,7 @@ load 10s
},
{
// Nested subquery.
// Now the outmost subquery produces more samples than inner most rate.
// Now the outermost subquery produces more samples than inner most rate.
Query: `rate(rate(bigmetric[10s:1s] @ 10)[100s:25s] @ 1000)[17s:1s] @ 2000`,
MaxSamples: 36,
Start: time.Unix(10, 0),

22
promql/query_logger.go

@ -31,7 +31,7 @@ import (
)
type ActiveQueryTracker struct {
mmapedFile []byte
mmappedFile []byte
getNextIndex chan int
logger log.Logger
closer io.Closer
@ -87,24 +87,24 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
}
}
type mmapedFile struct {
type mmappedFile struct {
f io.Closer
m mmap.MMap
}
func (f *mmapedFile) Close() error {
func (f *mmappedFile) Close() error {
err := f.m.Unmap()
if err != nil {
err = fmt.Errorf("mmapedFile: unmapping: %w", err)
err = fmt.Errorf("mmappedFile: unmapping: %w", err)
}
if fErr := f.f.Close(); fErr != nil {
return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err)
return errors.Join(fmt.Errorf("close mmappedFile.f: %w", fErr), err)
}
return err
}
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
func getMMappedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
if err != nil {
absPath, pathErr := filepath.Abs(filename)
@ -129,7 +129,7 @@ func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io
return nil, nil, err
}
return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err
return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err
}
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
@ -141,14 +141,14 @@ func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger lo
filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize
logUnfinishedQueries(filename, filesize, logger)
fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger)
fileAsBytes, closer, err := getMMappedFile(filename, filesize, logger)
if err != nil {
panic("Unable to create mmap-ed active query log")
}
copy(fileAsBytes, "[")
activeQueryTracker := ActiveQueryTracker{
mmapedFile: fileAsBytes,
mmappedFile: fileAsBytes,
closer: closer,
getNextIndex: make(chan int, maxConcurrent),
logger: logger,
@ -206,14 +206,14 @@ func (tracker ActiveQueryTracker) GetMaxConcurrent() int {
}
func (tracker ActiveQueryTracker) Delete(insertIndex int) {
copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize))
copy(tracker.mmappedFile[insertIndex:], strings.Repeat("\x00", entrySize))
tracker.getNextIndex <- insertIndex
}
func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) {
select {
case i := <-tracker.getNextIndex:
fileBytes := tracker.mmapedFile
fileBytes := tracker.mmappedFile
entry := newJSONEntry(query, tracker.logger)
start, end := i, i+entrySize

8
promql/query_logger_test.go

@ -26,7 +26,7 @@ import (
func TestQueryLogging(t *testing.T) {
fileAsBytes := make([]byte, 4096)
queryLogger := ActiveQueryTracker{
mmapedFile: fileAsBytes,
mmappedFile: fileAsBytes,
logger: nil,
getNextIndex: make(chan int, 4),
}
@ -70,7 +70,7 @@ func TestQueryLogging(t *testing.T) {
func TestIndexReuse(t *testing.T) {
queryBytes := make([]byte, 1+3*entrySize)
queryLogger := ActiveQueryTracker{
mmapedFile: queryBytes,
mmappedFile: queryBytes,
logger: nil,
getNextIndex: make(chan int, 3),
}
@ -106,10 +106,10 @@ func TestIndexReuse(t *testing.T) {
func TestMMapFile(t *testing.T) {
dir := t.TempDir()
fpath := filepath.Join(dir, "mmapedFile")
fpath := filepath.Join(dir, "mmappedFile")
const data = "ab"
fileAsBytes, closer, err := getMMapedFile(fpath, 2, nil)
fileAsBytes, closer, err := getMMappedFile(fpath, 2, nil)
require.NoError(t, err)
copy(fileAsBytes, data)
require.NoError(t, closer.Close())

2
promql/value.go

@ -526,7 +526,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType {
ssi.currH = p.H
return chunkenc.ValFloatHistogram
default:
panic("storageSeriesIterater.Next failed to pick value type")
panic("storageSeriesIterator.Next failed to pick value type")
}
}

2
rules/group.go

@ -188,7 +188,7 @@ func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) boo
return ok
}
// Queryable returns the group's querable.
// Queryable returns the group's queryable.
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }
// Context returns the group's context.

2
scrape/manager_test.go

@ -1186,7 +1186,7 @@ scrape_configs:
}
// TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no
// longer discover targets, only the stale targets of that provier are dropped.
// longer discover targets, only the stale targets of that provider are dropped.
func TestOnlyStaleTargetsAreDropped(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

6
storage/buffer.go

@ -241,9 +241,9 @@ type sampleRing struct {
delta int64
// Lookback buffers. We use iBuf for mixed samples, but one of the three
// concrete ones for homogenous samples. (Only one of the four bufs is
// concrete ones for homogeneous samples. (Only one of the four bufs is
// allowed to be populated!) This avoids the overhead of the interface
// wrapper for the happy (and by far most common) case of homogenous
// wrapper for the happy (and by far most common) case of homogeneous
// samples.
iBuf []chunks.Sample
fBuf []fSample
@ -268,7 +268,7 @@ const (
fhBuf
)
// newSampleRing creates a new sampleRing. If you do not know the prefereed
// newSampleRing creates a new sampleRing. If you do not know the preferred
// value type yet, use a size of 0 (in which case the provided typ doesn't
// matter). On the first add, a buffer of size 16 will be allocated with the
// preferred type being the type of the first added sample.

2
storage/remote/azuread/azuread_test.go

@ -68,7 +68,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
cases := []struct {
cfg *AzureADConfig
}{
// AzureAd roundtripper with Managedidentity.
// AzureAd roundtripper with ManagedIdentity.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",

4
storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go

@ -171,7 +171,7 @@ func TestConvertBucketsLayout(t *testing.T) {
},
// Downscale:
// 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1
// Check from sclaing from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1
// Check from scaling from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1
wantDeltas: []int64{8, -7},
},
},
@ -222,7 +222,7 @@ func TestConvertBucketsLayout(t *testing.T) {
},
// Downscale:
// 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1
// Check from sclaing from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1
// Check from scaling from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1
wantDeltas: []int64{8, -8, 0, 1},
},
},

2
storage/remote/queue_manager.go

@ -2027,7 +2027,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt
// make the problem worse, particularly if we're getting rate limited.
//
// reshardDisableTimestamp holds the unix timestamp until which resharding
// is diableld. We'll update that timestamp if the period we were just told
// is disabled. We'll update that timestamp if the period we were just told
// to sleep for is newer than the existing disabled timestamp.
reshardWaitPeriod := time.Now().Add(time.Duration(sleepDuration) * 2)
if oldTS, updated := setAtomicToNewer(&t.reshardDisableEndTimestamp, reshardWaitPeriod.Unix()); updated {

2
storage/remote/queue_manager_test.go

@ -351,7 +351,7 @@ func TestMetadataDelivery(t *testing.T) {
require.Equal(t, 0.0, client_testutil.ToFloat64(m.metrics.failedMetadataTotal))
require.Len(t, c.receivedMetadata, numMetadata)
// One more write than the rounded qoutient should be performed in order to get samples that didn't
// One more write than the rounded quotient should be performed in order to get samples that didn't
// fit into MaxSamplesPerSend.
require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived)
// Make sure the last samples were sent.

2
storage/remote/write.go

@ -308,7 +308,7 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels,
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
// UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now.
// UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now.
return 0, nil
}

4
tsdb/agent/db.go

@ -335,7 +335,7 @@ func validateOptions(opts *Options) *Options {
opts.WALCompression = wlog.CompressionNone
}
// Revert Stripesize to DefaultStripsize if Stripsize is either 0 or not a power of 2.
// Revert StripeSize to DefaultStripeSize if StripeSize is either 0 or not a power of 2.
if opts.StripeSize <= 0 || ((opts.StripeSize & (opts.StripeSize - 1)) != 0) {
opts.StripeSize = tsdb.DefaultStripeSize
}
@ -395,7 +395,7 @@ func (db *DB) replayWAL() error {
return fmt.Errorf("finding WAL segments: %w", err)
}
// Backfil segments from the most recent checkpoint onwards.
// Backfill segments from the most recent checkpoint onwards.
for i := startFrom; i <= last; i++ {
seg, err := wlog.OpenReadSegment(wlog.SegmentName(db.wal.Dir(), i))
if err != nil {

2
tsdb/chunkenc/histogram_meta_test.go

@ -14,7 +14,7 @@
// The code in this file was largely written by Damian Gryski as part of
// https://github.com/dgryski/go-tsz and published under the license below.
// It was modified to accommodate reading from byte slices without modifying
// the underlying bytes, which would panic when reading from mmap'd
// the underlying bytes, which would panic when reading from mmapped
// read-only byte slices.
package chunkenc

2
tsdb/chunkenc/xor.go

@ -14,7 +14,7 @@
// The code in this file was largely written by Damian Gryski as part of
// https://github.com/dgryski/go-tsz and published under the license below.
// It was modified to accommodate reading from byte slices without modifying
// the underlying bytes, which would panic when reading from mmap'd
// the underlying bytes, which would panic when reading from mmapped
// read-only byte slices.
// Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>

2
tsdb/chunks/chunk_write_queue.go

@ -24,7 +24,7 @@ import (
)
const (
// Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again.
// Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again.
chunkRefMapShrinkThreshold = 1000
// Minimum interval between shrinking of chunkWriteQueue.chunkRefMap.

4
tsdb/compact_test.go

@ -1371,7 +1371,7 @@ func TestCancelCompactions(t *testing.T) {
}
// TestDeleteCompactionBlockAfterFailedReload ensures that a failed reloadBlocks immediately after a compaction
// deletes the resulting block to avoid creatings blocks with the same time range.
// deletes the resulting block to avoid creating blocks with the same time range.
func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) {
tests := map[string]func(*DB) int{
"Test Head Compaction": func(db *DB) int {
@ -2114,7 +2114,7 @@ func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) {
t.Parallel()
tmpdir := t.TempDir()
// Some blocks that need compation are present.
// Some blocks that need compaction are present.
createBlock(t, tmpdir, genSeries(1, 1, 0, 100))
createBlock(t, tmpdir, genSeries(1, 1, 100, 200))
createBlock(t, tmpdir, genSeries(1, 1, 200, 300))

32
tsdb/db_test.go

@ -245,8 +245,8 @@ func TestDataAvailableOnlyAfterCommit(t *testing.T) {
func TestNoPanicAfterWALCorruption(t *testing.T) {
db := openTestDB(t, &Options{WALSegmentSize: 32 * 1024}, nil)
// Append until the first mmaped head chunk.
// This is to ensure that all samples can be read from the mmaped chunks when the WAL is corrupted.
// Append until the first mmapped head chunk.
// This is to ensure that all samples can be read from the mmapped chunks when the WAL is corrupted.
var expSamples []chunks.Sample
var maxt int64
ctx := context.Background()
@ -265,7 +265,7 @@ func TestNoPanicAfterWALCorruption(t *testing.T) {
// Corrupt the WAL after the first sample of the series so that it has at least one sample and
// it is not garbage collected.
// The repair deletes all WAL records after the corrupted record and these are read from the mmaped chunk.
// The repair deletes all WAL records after the corrupted record and these are read from the mmapped chunk.
{
walFiles, err := os.ReadDir(path.Join(db.Dir(), "wal"))
require.NoError(t, err)
@ -2650,7 +2650,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
spinUpQuerierAndCheck := func(dir, sandboxDir string, chunksCount int) {
dBDirHash := dirHash(dir)
// Bootsrap a RO db from the same dir and set up a querier.
// Bootstrap a RO db from the same dir and set up a querier.
dbReadOnly, err := OpenDBReadOnly(dir, sandboxDir, nil)
require.NoError(t, err)
require.Equal(t, chunksCount, countChunks(dir))
@ -2669,7 +2669,7 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) {
require.NoError(t, db.Close())
}()
// Append until the first mmaped head chunk.
// Append until the first mmapped head chunk.
for i := 0; i < 121; i++ {
app := db.Appender(context.Background())
_, err := app.Append(0, labels.FromStrings("foo", "bar"), int64(i), 0)
@ -5156,7 +5156,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
},
},
{
name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval",
name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval",
oooCap: 5,
queryMinT: minutes(0),
queryMaxT: minutes(200),
@ -5169,7 +5169,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
},
{
minT: minutes(101),
maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk.
maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk.
filter: func(t int64) bool { return t%2 == 1 },
isOOO: true,
},
@ -5182,7 +5182,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
},
},
{
name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval",
name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval",
oooCap: 30,
queryMinT: minutes(0),
queryMaxT: minutes(200),
@ -5195,7 +5195,7 @@ func Test_Querier_OOOQuery(t *testing.T) {
},
{
minT: minutes(101),
maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk.
maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk.
filter: func(t int64) bool { return t%2 == 1 },
isOOO: true,
},
@ -5367,7 +5367,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
},
},
{
name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval",
name: "query inorder contain ooo mmapped samples returns all ingested samples at the beginning of the interval",
oooCap: 5,
queryMinT: minutes(0),
queryMaxT: minutes(200),
@ -5380,7 +5380,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
},
{
minT: minutes(101),
maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk.
maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmapped OOO chunk and fit inside the first in-order mmapped chunk.
filter: func(t int64) bool { return t%2 == 1 },
isOOO: true,
},
@ -5393,7 +5393,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
},
},
{
name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval",
name: "query overlapping inorder and ooo mmapped samples returns all ingested samples at the beginning of the interval",
oooCap: 30,
queryMinT: minutes(0),
queryMaxT: minutes(200),
@ -5406,7 +5406,7 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
},
{
minT: minutes(101),
maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk.
maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmapped OOO chunk and overlap the first in-order mmapped chunk.
filter: func(t int64) bool { return t%2 == 1 },
isOOO: true,
},
@ -5555,7 +5555,7 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) {
addSample(s2, 255, 265, false)
verifyOOOMinMaxTimes(250, 265)
testQuery(math.MinInt64, math.MaxInt64)
testQuery(minutes(250), minutes(265)) // Test querying ono data time range
testQuery(minutes(250), minutes(265)) // Test querying ooo data time range
testQuery(minutes(290), minutes(300)) // Test querying in-order data time range
testQuery(minutes(250), minutes(300)) // Test querying the entire range
@ -7468,7 +7468,7 @@ func TestAbortBlockCompactions(t *testing.T) {
defer func() {
require.NoError(t, db.Close())
}()
// It should NOT be compactible at the beginning of the test
// It should NOT be compactable at the beginning of the test
require.False(t, db.head.compactable(), "head should NOT be compactable")
// Track the number of compactions run inside db.compactBlocks()
@ -7478,7 +7478,7 @@ func TestAbortBlockCompactions(t *testing.T) {
db.compactor = &mockCompactorFn{
planFn: func() ([]string, error) {
// On every Plan() run increment compactions. After 4 compactions
// update HEAD to make it compactible to force an exit from db.compactBlocks() loop.
// update HEAD to make it compactable to force an exit from db.compactBlocks() loop.
compactions++
if compactions > 3 {
chunkRange := db.head.chunkRange.Load()

2
tsdb/exemplar.go

@ -29,7 +29,7 @@ import (
)
const (
// Indicates that there is no index entry for an exmplar.
// Indicates that there is no index entry for an exemplar.
noExemplar = -1
// Estimated number of exemplars per series, for sizing the index.
estimatedExemplarsPerSeries = 16

2
tsdb/head.go

@ -2090,7 +2090,7 @@ type memSeries struct {
// before compaction: mmappedChunks=[p5,p6,p7,p8,p9] firstChunkID=5
// after compaction: mmappedChunks=[p7,p8,p9] firstChunkID=7
//
// pN is the pointer to the mmappedChunk referered to by HeadChunkID=N
// pN is the pointer to the mmappedChunk referred to by HeadChunkID=N
mmappedChunks []*mmappedChunk
// Most recent chunks in memory that are still being built or waiting to be mmapped.
// This is a linked list, headChunks points to the most recent chunk, headChunks.next points

2
tsdb/head_test.go

@ -1060,7 +1060,7 @@ func TestMemSeries_truncateChunks_scenarios(t *testing.T) {
tests := []struct {
name string
headChunks int // the number of head chubks to create on memSeries by appending enough samples
headChunks int // the number of head chunks to create on memSeries by appending enough samples
mmappedChunks int // the number of mmapped chunks to create on memSeries by appending enough samples
truncateBefore int64 // the mint to pass to truncateChunksBefore()
expectedTruncated int // the number of chunks that we're expecting be truncated and returned by truncateChunksBefore()

2
tsdb/querier_test.go

@ -3235,7 +3235,7 @@ func (m mockMatcherIndex) LabelValueFor(context.Context, storage.SeriesRef, stri
}
func (m mockMatcherIndex) LabelNamesFor(ctx context.Context, postings index.Postings) ([]string, error) {
return nil, errors.New("label names for for called")
return nil, errors.New("label names for called")
}
func (m mockMatcherIndex) Postings(context.Context, string, ...string) (index.Postings, error) {

2
tsdb/record/record_test.go

@ -166,7 +166,7 @@ func TestRecord_EncodeDecode(t *testing.T) {
require.NoError(t, err)
require.Equal(t, floatHistograms, decFloatHistograms)
// Gauge ingeger histograms.
// Gauge integer histograms.
for i := range histograms {
histograms[i].H.CounterResetHint = histogram.GaugeType
}

12
util/fmtutil/format.go

@ -113,7 +113,7 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me
toTimeseries(wr, labels, timestamp, m.GetCounter().GetValue())
case m.Summary != nil:
metricName := labels[model.MetricNameLabel]
// Preserve metric name order with first quantile labels timeseries then sum suffix timeserie and finally count suffix timeserie
// Preserve metric name order with first quantile labels timeseries then sum suffix timeseries and finally count suffix timeseries
// Add Summary quantile timeseries
quantileLabels := make(map[string]string, len(labels)+1)
for key, value := range labels {
@ -125,16 +125,16 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me
toTimeseries(wr, quantileLabels, timestamp, q.GetValue())
}
// Overwrite label model.MetricNameLabel for count and sum metrics
// Add Summary sum timeserie
// Add Summary sum timeseries
labels[model.MetricNameLabel] = metricName + sumStr
toTimeseries(wr, labels, timestamp, m.GetSummary().GetSampleSum())
// Add Summary count timeserie
// Add Summary count timeseries
labels[model.MetricNameLabel] = metricName + countStr
toTimeseries(wr, labels, timestamp, float64(m.GetSummary().GetSampleCount()))
case m.Histogram != nil:
metricName := labels[model.MetricNameLabel]
// Preserve metric name order with first bucket suffix timeseries then sum suffix timeserie and finally count suffix timeserie
// Preserve metric name order with first bucket suffix timeseries then sum suffix timeseries and finally count suffix timeseries
// Add Histogram bucket timeseries
bucketLabels := make(map[string]string, len(labels)+1)
for key, value := range labels {
@ -146,10 +146,10 @@ func makeTimeseries(wr *prompb.WriteRequest, labels map[string]string, m *dto.Me
toTimeseries(wr, bucketLabels, timestamp, float64(b.GetCumulativeCount()))
}
// Overwrite label model.MetricNameLabel for count and sum metrics
// Add Histogram sum timeserie
// Add Histogram sum timeseries
labels[model.MetricNameLabel] = metricName + sumStr
toTimeseries(wr, labels, timestamp, m.GetHistogram().GetSampleSum())
// Add Histogram count timeserie
// Add Histogram count timeseries
labels[model.MetricNameLabel] = metricName + countStr
toTimeseries(wr, labels, timestamp, float64(m.GetHistogram().GetSampleCount()))

2
util/runtime/limits_default.go

@ -23,7 +23,7 @@ import (
// syscall.RLIM_INFINITY is a constant.
// Its type is int on most architectures but there are exceptions such as loong64.
// Uniform it to uint accorind to the standard.
// Uniform it to uint according to the standard.
// https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_resource.h.html
var unlimited uint64 = syscall.RLIM_INFINITY & math.MaxUint64

2
util/zeropool/pool_test.go

@ -81,7 +81,7 @@ func TestPool(t *testing.T) {
t.Run("does not allocate", func(t *testing.T) {
pool := zeropool.New(func() []byte { return make([]byte, 1024) })
// Warm up, this will alloate one slice.
// Warm up, this will allocate one slice.
slice := pool.Get()
pool.Put(slice)

4
web/api/v1/api_test.go

@ -4034,13 +4034,13 @@ func TestGetGlobalURL(t *testing.T) {
false,
},
{
mustParseURL(t, "http://exemple.com"),
mustParseURL(t, "http://example.com"),
GlobalURLOptions{
ListenAddress: "127.0.0.1:9090",
Host: "prometheus.io",
Scheme: "https",
},
mustParseURL(t, "http://exemple.com"),
mustParseURL(t, "http://example.com"),
false,
},
{

2
web/ui/module/codemirror-promql/src/parser/parser.test.ts

@ -528,7 +528,7 @@ describe('promql operations', () => {
},
],
},
// test aggregration
// test aggregation
{
expr: 'sum by (foo)(some_metric)',
expectedValueType: ValueType.vector,

2
web/ui/react-app/src/pages/graph/Panel.tsx

@ -136,7 +136,7 @@ class Panel extends Component<PanelProps, PanelState> {
this.abortInFlightFetch = () => abortController.abort();
this.setState({ loading: true });
const endTime = this.getEndTime().valueOf() / 1000; // TODO: shouldn't valueof only work when it's a moment?
const endTime = this.getEndTime().valueOf() / 1000; // TODO: shouldn't valueOf only work when it's a moment?
const startTime = endTime - this.props.options.range / 1000;
const resolution = this.props.options.resolution || Math.max(Math.floor(this.props.options.range / 250000), 1);
const params: URLSearchParams = new URLSearchParams({

Loading…
Cancel
Save