mirror of https://github.com/prometheus/prometheus
Merge pull request #3125 from prometheus/bjk/staticcheck
Enable statitcheck at build time.pull/3167/head
commit
1ab0bbb2c2
18
Makefile
18
Makefile
|
@ -14,6 +14,7 @@
|
||||||
GO := GO15VENDOREXPERIMENT=1 go
|
GO := GO15VENDOREXPERIMENT=1 go
|
||||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||||
|
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
|
||||||
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
|
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
|
||||||
|
|
||||||
PREFIX ?= $(shell pwd)
|
PREFIX ?= $(shell pwd)
|
||||||
|
@ -25,8 +26,15 @@ ifdef DEBUG
|
||||||
bindata_flags = -debug
|
bindata_flags = -debug
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
STATICCHECK_IGNORE = \
|
||||||
|
github.com/prometheus/prometheus/discovery/kubernetes/node.go:SA1019 \
|
||||||
|
github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/main.go:SA1019 \
|
||||||
|
github.com/prometheus/prometheus/storage/local/codable/codable.go:SA6002 \
|
||||||
|
github.com/prometheus/prometheus/storage/local/persistence.go:SA6002 \
|
||||||
|
github.com/prometheus/prometheus/storage/remote/queue_manager.go:SA1015 \
|
||||||
|
github.com/prometheus/prometheus/web/web.go:SA1019
|
||||||
|
|
||||||
all: format build test
|
all: format staticcheck build test
|
||||||
|
|
||||||
style:
|
style:
|
||||||
@echo ">> checking code style"
|
@echo ">> checking code style"
|
||||||
|
@ -52,6 +60,10 @@ vet:
|
||||||
@echo ">> vetting code"
|
@echo ">> vetting code"
|
||||||
@$(GO) vet $(pkgs)
|
@$(GO) vet $(pkgs)
|
||||||
|
|
||||||
|
staticcheck: $(STATICCHECK)
|
||||||
|
@echo ">> running staticcheck"
|
||||||
|
@$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
||||||
|
|
||||||
build: promu
|
build: promu
|
||||||
@echo ">> building binaries"
|
@echo ">> building binaries"
|
||||||
@$(PROMU) build --prefix $(PREFIX)
|
@$(PROMU) build --prefix $(PREFIX)
|
||||||
|
@ -76,5 +88,7 @@ promu:
|
||||||
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
|
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
|
||||||
$(GO) get -u github.com/prometheus/promu
|
$(GO) get -u github.com/prometheus/promu
|
||||||
|
|
||||||
|
$(FIRST_GOPATH)/bin/staticcheck:
|
||||||
|
@GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
|
||||||
|
|
||||||
.PHONY: all style check_license format build test vet assets tarball docker promu
|
.PHONY: all style check_license format build test vet assets tarball docker promu staticcheck $(FIRST_GOPATH)/bin/staticcheck
|
||||||
|
|
|
@ -229,7 +229,7 @@ func Main() int {
|
||||||
webHandler.Ready()
|
webHandler.Ready()
|
||||||
log.Info("Server is Ready to receive requests.")
|
log.Info("Server is Ready to receive requests.")
|
||||||
|
|
||||||
term := make(chan os.Signal)
|
term := make(chan os.Signal, 1)
|
||||||
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||||
select {
|
select {
|
||||||
case <-term:
|
case <-term:
|
||||||
|
|
|
@ -130,7 +130,7 @@ func TestTargetURL(t *testing.T) {
|
||||||
func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelSet) *Target {
|
func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelSet) *Target {
|
||||||
labels = labels.Clone()
|
labels = labels.Clone()
|
||||||
labels[model.SchemeLabel] = "http"
|
labels[model.SchemeLabel] = "http"
|
||||||
labels[model.AddressLabel] = model.LabelValue(strings.TrimLeft(targetURL, "http://"))
|
labels[model.AddressLabel] = model.LabelValue(strings.TrimPrefix(targetURL, "http://"))
|
||||||
labels[model.MetricsPathLabel] = "/metrics"
|
labels[model.MetricsPathLabel] = "/metrics"
|
||||||
|
|
||||||
return &Target{
|
return &Target{
|
||||||
|
|
|
@ -63,6 +63,9 @@ func TestFPMapper(t *testing.T) {
|
||||||
defer closer.Close()
|
defer closer.Close()
|
||||||
|
|
||||||
mapper, err := newFPMapper(sm, p)
|
mapper, err := newFPMapper(sm, p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Everything is empty, resolving a FP should do nothing.
|
// Everything is empty, resolving a FP should do nothing.
|
||||||
gotFP := mapper.mapFP(fp1, cm11)
|
gotFP := mapper.mapFP(fp1, cm11)
|
||||||
|
|
|
@ -462,7 +462,7 @@ func (p *persistence) persistChunks(fp model.Fingerprint, chunks []chunk.Chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine index within the file.
|
// Determine index within the file.
|
||||||
offset, err := f.Seek(0, os.SEEK_CUR)
|
offset, err := f.Seek(0, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
@ -498,7 +498,7 @@ func (p *persistence) loadChunks(fp model.Fingerprint, indexes []int, indexOffse
|
||||||
// This loads chunks in batches. A batch is a streak of
|
// This loads chunks in batches. A batch is a streak of
|
||||||
// consecutive chunks, read from disk in one go.
|
// consecutive chunks, read from disk in one go.
|
||||||
batchSize := 1
|
batchSize := 1
|
||||||
if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), os.SEEK_SET); err != nil {
|
if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), io.SeekStart); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -561,7 +561,7 @@ func (p *persistence) loadChunkDescs(fp model.Fingerprint, offsetFromEnd int) ([
|
||||||
cds := make([]*chunk.Desc, numChunks)
|
cds := make([]*chunk.Desc, numChunks)
|
||||||
chunkTimesBuf := make([]byte, 16)
|
chunkTimesBuf := make([]byte, 16)
|
||||||
for i := 0; i < numChunks; i++ {
|
for i := 0; i < numChunks; i++ {
|
||||||
_, err := f.Seek(offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, os.SEEK_SET)
|
_, err := f.Seek(offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -814,7 +814,7 @@ func (p *persistence) checkpointSeriesMapAndHeads(
|
||||||
if realNumberOfSeries != numberOfSeriesInHeader {
|
if realNumberOfSeries != numberOfSeriesInHeader {
|
||||||
// The number of series has changed in the meantime.
|
// The number of series has changed in the meantime.
|
||||||
// Rewrite it in the header.
|
// Rewrite it in the header.
|
||||||
if _, err = f.Seek(int64(numberOfSeriesOffset), os.SEEK_SET); err != nil {
|
if _, err = f.Seek(int64(numberOfSeriesOffset), io.SeekStart); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = codable.EncodeUint64(f, realNumberOfSeries); err != nil {
|
if err = codable.EncodeUint64(f, realNumberOfSeries); err != nil {
|
||||||
|
@ -971,7 +971,7 @@ func (p *persistence) dropAndPersistChunks(
|
||||||
headerBuf := make([]byte, chunkHeaderLen)
|
headerBuf := make([]byte, chunkHeaderLen)
|
||||||
// Find the first chunk in the file that should be kept.
|
// Find the first chunk in the file that should be kept.
|
||||||
for ; ; numDropped++ {
|
for ; ; numDropped++ {
|
||||||
_, err = f.Seek(offsetForChunkIndex(numDropped), os.SEEK_SET)
|
_, err = f.Seek(offsetForChunkIndex(numDropped), io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1007,7 +1007,7 @@ func (p *persistence) dropAndPersistChunks(
|
||||||
if numDropped == chunkIndexToStartSeek {
|
if numDropped == chunkIndexToStartSeek {
|
||||||
// Nothing to drop. Just adjust the return values and append the chunks (if any).
|
// Nothing to drop. Just adjust the return values and append the chunks (if any).
|
||||||
numDropped = 0
|
numDropped = 0
|
||||||
_, err = f.Seek(offsetForChunkIndex(0), os.SEEK_SET)
|
_, err = f.Seek(offsetForChunkIndex(0), io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1033,7 +1033,7 @@ func (p *persistence) dropAndPersistChunks(
|
||||||
binary.LittleEndian.Uint64(headerBuf[chunkHeaderFirstTimeOffset:]),
|
binary.LittleEndian.Uint64(headerBuf[chunkHeaderFirstTimeOffset:]),
|
||||||
)
|
)
|
||||||
chunk.Ops.WithLabelValues(chunk.Drop).Add(float64(numDropped))
|
chunk.Ops.WithLabelValues(chunk.Drop).Add(float64(numDropped))
|
||||||
_, err = f.Seek(-chunkHeaderLen, os.SEEK_CUR)
|
_, err = f.Seek(-chunkHeaderLen, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1354,7 +1354,7 @@ func (p *persistence) openChunkFileForWriting(fp model.Fingerprint) (*os.File, e
|
||||||
}
|
}
|
||||||
return os.OpenFile(p.fileNameForFingerprint(fp), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
return os.OpenFile(p.fileNameForFingerprint(fp), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
|
||||||
// NOTE: Although the file was opened for append,
|
// NOTE: Although the file was opened for append,
|
||||||
// f.Seek(0, os.SEEK_CUR)
|
// f.Seek(0, io.SeekCurrent)
|
||||||
// would now return '0, nil', so we cannot check for a consistent file length right now.
|
// would now return '0, nil', so we cannot check for a consistent file length right now.
|
||||||
// However, the chunkIndexForOffset function is doing that check, so a wrong file length
|
// However, the chunkIndexForOffset function is doing that check, so a wrong file length
|
||||||
// would still be detected.
|
// would still be detected.
|
||||||
|
|
|
@ -136,6 +136,9 @@ func testPersistLoadDropChunks(t *testing.T, encoding chunk.Encoding) {
|
||||||
}
|
}
|
||||||
// Load all chunk descs.
|
// Load all chunk descs.
|
||||||
actualChunkDescs, err := p.loadChunkDescs(fp, 0)
|
actualChunkDescs, err := p.loadChunkDescs(fp, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
if len(actualChunkDescs) != 10 {
|
if len(actualChunkDescs) != 10 {
|
||||||
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 10)
|
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 10)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1420,7 +1420,7 @@ func testEvictAndPurgeSeries(t *testing.T, encoding chunk.Encoding) {
|
||||||
// Unarchive metrics.
|
// Unarchive metrics.
|
||||||
s.getOrCreateSeries(fp, model.Metric{})
|
s.getOrCreateSeries(fp, model.Metric{})
|
||||||
|
|
||||||
series, ok = s.fpToSeries.get(fp)
|
_, ok = s.fpToSeries.get(fp)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatal("could not find series")
|
t.Fatal("could not find series")
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,6 +68,9 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
|
||||||
url: &config.URL{serverURL},
|
url: &config.URL{serverURL},
|
||||||
timeout: model.Duration(time.Second),
|
timeout: model.Duration(time.Second),
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
err = c.Store(nil)
|
err = c.Store(nil)
|
||||||
if !reflect.DeepEqual(err, test.err) {
|
if !reflect.DeepEqual(err, test.err) {
|
||||||
|
|
Loading…
Reference in New Issue