Browse Source

fix the wrong word (#6069)

Signed-off-by: chentanjun <2799194073@qq.com>
pull/6078/head
陈谭军 5 years ago committed by Chris Marchbanks
parent
commit
103f26d188
  1. 2
      .circleci/config.yml
  2. 2
      storage/remote/client.go
  3. 2
      storage/remote/write.go
  4. 2
      tsdb/block.go
  5. 2
      tsdb/compact_test.go
  6. 6
      tsdb/db.go
  7. 2
      tsdb/goversion/goversion.go
  8. 2
      tsdb/head_test.go
  9. 4
      tsdb/querier.go
  10. 2
      tsdb/wal.go
  11. 2
      tsdb/wal/wal_test.go
  12. 2
      tsdb/wal_test.go

2
.circleci/config.yml

@ -24,7 +24,7 @@ jobs:
- run:
command: make
environment:
# Run garbage collection more aggresively to avoid getting OOMed during the lint phase.
# Run garbage collection more aggressively to avoid getting OOMed during the lint phase.
GOGC: "20"
# By default Go uses GOMAXPROCS but a Circle CI executor has many
# cores (> 30) while the CPU and RAM resources are throttled. If we

2
storage/remote/client.go

@ -77,7 +77,7 @@ type recoverableError struct {
func (c *Client) Store(ctx context.Context, req []byte) error {
httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req))
if err != nil {
// Errors from NewRequest are from unparseable URLs, so are not
// Errors from NewRequest are from unparsable URLs, so are not
// recoverable.
return err
}

2
storage/remote/write.go

@ -122,7 +122,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
}
// Use RemoteWriteConfigs and its index to get hash. So if its index changed,
// the correspoinding queue should also be restarted.
// the corresponding queue should also be restarted.
hash := md5.Sum(b)
if i < len(rws.queues) && rws.hashes[i] == hash && externalLabelUnchanged {
// The RemoteWriteConfig and index both not changed, keep the queue.

2
tsdb/block.go

@ -275,7 +275,7 @@ type Block struct {
meta BlockMeta
// Symbol Table Size in bytes.
// We maintain this variable to avoid recalculation everytime.
// We maintain this variable to avoid recalculation every time.
symbolTableSize uint64
chunkr ChunkReader

2
tsdb/compact_test.go

@ -936,7 +936,7 @@ func TestCancelCompactions(t *testing.T) {
testutil.Ok(t, os.RemoveAll(tmpdirCopy))
}()
// Measure the compaction time without interupting it.
// Measure the compaction time without interrupting it.
var timeCompactionUninterrupted time.Duration
{
db, err := Open(tmpdir, log.NewNopLogger(), nil, &Options{BlockRanges: []int64{1, 2000}})

6
tsdb/db.go

@ -260,7 +260,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics {
var ErrClosed = errors.New("db already closed")
// DBReadOnly provides APIs for read only operations on a database.
// Current implementation doesn't support concurency so
// Current implementation doesn't support concurrency so
// all API calls should happen in the same go routine.
type DBReadOnly struct {
logger log.Logger
@ -272,7 +272,7 @@ type DBReadOnly struct {
// OpenDBReadOnly opens DB in the given directory for read only operations.
func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) {
if _, err := os.Stat(dir); err != nil {
return nil, errors.Wrap(err, "openning the db dir")
return nil, errors.Wrap(err, "opening the db dir")
}
if l == nil {
@ -359,7 +359,7 @@ func (db *DBReadOnly) Querier(mint, maxt int64) (Querier, error) {
maxBlockTime = blocks[len(blocks)-1].Meta().MaxTime
}
// Also add the WAL if the current blocks don't cover the requestes time range.
// Also add the WAL if the current blocks don't cover the requests time range.
if maxBlockTime <= maxt {
w, err := wal.Open(db.logger, nil, filepath.Join(db.dir, "wal"))
if err != nil {

2
tsdb/goversion/goversion.go

@ -13,7 +13,7 @@
// +build go1.12
// Package goversion enforces the go version suported by the tsdb module.
// Package goversion enforces the go version supported by the tsdb module.
package goversion
const _SoftwareRequiresGOVERSION1_12 = uint8(0)

2
tsdb/head_test.go

@ -149,7 +149,7 @@ func TestHead_ReadWAL(t *testing.T) {
s100 := head.series.getByID(100)
testutil.Equals(t, labels.FromStrings("a", "1"), s10.lset)
testutil.Equals(t, (*memSeries)(nil), s11) // Series without samples should be garbage colected at head.Init().
testutil.Equals(t, (*memSeries)(nil), s11) // Series without samples should be garbage collected at head.Init().
testutil.Equals(t, labels.FromStrings("a", "4"), s50.lset)
testutil.Equals(t, labels.FromStrings("a", "3"), s100.lset)

4
tsdb/querier.go

@ -906,7 +906,7 @@ func (s *chainedSeries) Iterator() SeriesIterator {
return newChainedSeriesIterator(s.series...)
}
// chainedSeriesIterator implements a series iterater over a list
// chainedSeriesIterator implements a series iterator over a list
// of time-sorted, non-overlapping iterators.
type chainedSeriesIterator struct {
series []Series // series in time order
@ -977,7 +977,7 @@ func (s *verticalChainedSeries) Iterator() SeriesIterator {
return newVerticalMergeSeriesIterator(s.series...)
}
// verticalMergeSeriesIterator implements a series iterater over a list
// verticalMergeSeriesIterator implements a series iterator over a list
// of time-sorted, time-overlapping iterators.
type verticalMergeSeriesIterator struct {
a, b SeriesIterator

2
tsdb/wal.go

@ -915,7 +915,7 @@ func (r *walReader) Read(
et, flag, b := r.at()
// In decoding below we never return a walCorruptionErr for now.
// Those should generally be catched by entry decoding before.
// Those should generally be caught by entry decoding before.
switch et {
case WALEntrySeries:
var series []record.RefSeries

2
tsdb/wal/wal_test.go

@ -46,7 +46,7 @@ func TestWALRepair_ReadingError(t *testing.T) {
8,
},
// Ensures that the page buffer is big enough to fit
// an entire page size without panicing.
// an entire page size without panicking.
// https://github.com/prometheus/prometheus/tsdb/pull/414
"bad_header": {
1,

2
tsdb/wal_test.go

@ -401,7 +401,7 @@ func TestWALRestoreCorrupted(t *testing.T) {
// cut() truncates and fsyncs the first segment async. If it happens after
// the corruption we apply below, the corruption will be overwritten again.
// Fire and forget a sync to avoid flakyness.
// Fire and forget a sync to avoid flakiness.
w.files[0].Sync()
// Corrupt the second entry in the first file.
// After re-opening we must be able to read the first entry

Loading…
Cancel
Save