Merge pull request #3125 from prometheus/bjk/staticcheck

Enable statitcheck at build time.
pull/3167/head
Ben Kochie 2017-09-13 14:42:29 -07:00 committed by GitHub
commit 1ab0bbb2c2
8 changed files with 36 additions and 13 deletions

View File

@ -14,6 +14,7 @@
GO := GO15VENDOREXPERIMENT=1 go
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
PROMU := $(FIRST_GOPATH)/bin/promu
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
PREFIX ?= $(shell pwd)
@ -25,8 +26,15 @@ ifdef DEBUG
bindata_flags = -debug
endif
STATICCHECK_IGNORE = \
github.com/prometheus/prometheus/discovery/kubernetes/node.go:SA1019 \
github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/main.go:SA1019 \
github.com/prometheus/prometheus/storage/local/codable/codable.go:SA6002 \
github.com/prometheus/prometheus/storage/local/persistence.go:SA6002 \
github.com/prometheus/prometheus/storage/remote/queue_manager.go:SA1015 \
github.com/prometheus/prometheus/web/web.go:SA1019
all: format build test
all: format staticcheck build test
style:
@echo ">> checking code style"
@ -52,6 +60,10 @@ vet:
@echo ">> vetting code"
@$(GO) vet $(pkgs)
staticcheck: $(STATICCHECK)
@echo ">> running staticcheck"
@$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
build: promu
@echo ">> building binaries"
@$(PROMU) build --prefix $(PREFIX)
@ -76,5 +88,7 @@ promu:
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
$(GO) get -u github.com/prometheus/promu
$(FIRST_GOPATH)/bin/staticcheck:
@GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
.PHONY: all style check_license format build test vet assets tarball docker promu
.PHONY: all style check_license format build test vet assets tarball docker promu staticcheck $(FIRST_GOPATH)/bin/staticcheck

View File

@ -229,7 +229,7 @@ func Main() int {
webHandler.Ready()
log.Info("Server is Ready to receive requests.")
term := make(chan os.Signal)
term := make(chan os.Signal, 1)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:

View File

@ -130,7 +130,7 @@ func TestTargetURL(t *testing.T) {
func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelSet) *Target {
labels = labels.Clone()
labels[model.SchemeLabel] = "http"
labels[model.AddressLabel] = model.LabelValue(strings.TrimLeft(targetURL, "http://"))
labels[model.AddressLabel] = model.LabelValue(strings.TrimPrefix(targetURL, "http://"))
labels[model.MetricsPathLabel] = "/metrics"
return &Target{

View File

@ -63,6 +63,9 @@ func TestFPMapper(t *testing.T) {
defer closer.Close()
mapper, err := newFPMapper(sm, p)
if err != nil {
t.Fatal(err)
}
// Everything is empty, resolving a FP should do nothing.
gotFP := mapper.mapFP(fp1, cm11)

View File

@ -462,7 +462,7 @@ func (p *persistence) persistChunks(fp model.Fingerprint, chunks []chunk.Chunk)
}
// Determine index within the file.
offset, err := f.Seek(0, os.SEEK_CUR)
offset, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return -1, err
}
@ -498,7 +498,7 @@ func (p *persistence) loadChunks(fp model.Fingerprint, indexes []int, indexOffse
// This loads chunks in batches. A batch is a streak of
// consecutive chunks, read from disk in one go.
batchSize := 1
if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), os.SEEK_SET); err != nil {
if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), io.SeekStart); err != nil {
return nil, err
}
@ -561,7 +561,7 @@ func (p *persistence) loadChunkDescs(fp model.Fingerprint, offsetFromEnd int) ([
cds := make([]*chunk.Desc, numChunks)
chunkTimesBuf := make([]byte, 16)
for i := 0; i < numChunks; i++ {
_, err := f.Seek(offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, os.SEEK_SET)
_, err := f.Seek(offsetForChunkIndex(i)+chunkHeaderFirstTimeOffset, io.SeekStart)
if err != nil {
return nil, err
}
@ -814,7 +814,7 @@ func (p *persistence) checkpointSeriesMapAndHeads(
if realNumberOfSeries != numberOfSeriesInHeader {
// The number of series has changed in the meantime.
// Rewrite it in the header.
if _, err = f.Seek(int64(numberOfSeriesOffset), os.SEEK_SET); err != nil {
if _, err = f.Seek(int64(numberOfSeriesOffset), io.SeekStart); err != nil {
return err
}
if err = codable.EncodeUint64(f, realNumberOfSeries); err != nil {
@ -971,7 +971,7 @@ func (p *persistence) dropAndPersistChunks(
headerBuf := make([]byte, chunkHeaderLen)
// Find the first chunk in the file that should be kept.
for ; ; numDropped++ {
_, err = f.Seek(offsetForChunkIndex(numDropped), os.SEEK_SET)
_, err = f.Seek(offsetForChunkIndex(numDropped), io.SeekStart)
if err != nil {
return
}
@ -1007,7 +1007,7 @@ func (p *persistence) dropAndPersistChunks(
if numDropped == chunkIndexToStartSeek {
// Nothing to drop. Just adjust the return values and append the chunks (if any).
numDropped = 0
_, err = f.Seek(offsetForChunkIndex(0), os.SEEK_SET)
_, err = f.Seek(offsetForChunkIndex(0), io.SeekStart)
if err != nil {
return
}
@ -1033,7 +1033,7 @@ func (p *persistence) dropAndPersistChunks(
binary.LittleEndian.Uint64(headerBuf[chunkHeaderFirstTimeOffset:]),
)
chunk.Ops.WithLabelValues(chunk.Drop).Add(float64(numDropped))
_, err = f.Seek(-chunkHeaderLen, os.SEEK_CUR)
_, err = f.Seek(-chunkHeaderLen, io.SeekCurrent)
if err != nil {
return
}
@ -1354,7 +1354,7 @@ func (p *persistence) openChunkFileForWriting(fp model.Fingerprint) (*os.File, e
}
return os.OpenFile(p.fileNameForFingerprint(fp), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
// NOTE: Although the file was opened for append,
// f.Seek(0, os.SEEK_CUR)
// f.Seek(0, io.SeekCurrent)
// would now return '0, nil', so we cannot check for a consistent file length right now.
// However, the chunkIndexForOffset function is doing that check, so a wrong file length
// would still be detected.

View File

@ -136,6 +136,9 @@ func testPersistLoadDropChunks(t *testing.T, encoding chunk.Encoding) {
}
// Load all chunk descs.
actualChunkDescs, err := p.loadChunkDescs(fp, 0)
if err != nil {
t.Fatal(err)
}
if len(actualChunkDescs) != 10 {
t.Errorf("Got %d chunkDescs, want %d.", len(actualChunkDescs), 10)
}

View File

@ -1420,7 +1420,7 @@ func testEvictAndPurgeSeries(t *testing.T, encoding chunk.Encoding) {
// Unarchive metrics.
s.getOrCreateSeries(fp, model.Metric{})
series, ok = s.fpToSeries.get(fp)
_, ok = s.fpToSeries.get(fp)
if !ok {
t.Fatal("could not find series")
}

View File

@ -68,6 +68,9 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
url: &config.URL{serverURL},
timeout: model.Duration(time.Second),
})
if err != nil {
t.Fatal(err)
}
err = c.Store(nil)
if !reflect.DeepEqual(err, test.err) {