You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
prometheus/tsdb/index/index_test.go

556 lines
14 KiB

// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package index
import (
"context"
Reduce memory used by postings offset table. Rather than keeping the offset of each postings list, instead keep the nth offset of the offset of the posting list. As postings list offsets have always been sorted, we can then get to the closest entry before the one we want an iterate forwards. I haven't done much tuning on the 32 number, it was chosen to try not to read through more than a 4k page of data. Switch to a bulk interface for fetching postings. Use it to avoid having to re-read parts of the posting offset table when querying lots of it. For a index with what BenchmarkHeadPostingForMatchers uses RAM for r.postings drops from 3.79MB to 80.19kB or about 48x. Bytes allocated go down by 30%, and suprisingly CPU usage drops by 4-6% for typical queries too. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05% BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56% BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70% BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01% BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01% BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69% BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69% BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
"fmt"
"hash/crc32"
"io/ioutil"
"math/rand"
"os"
8 years ago
"path/filepath"
"sort"
"testing"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/encoding"
"github.com/prometheus/prometheus/util/testutil"
)
type series struct {
l labels.Labels
chunks []chunks.Meta
}
type mockIndex struct {
series map[uint64]series
postings map[labels.Label][]uint64
symbols map[string]struct{}
}
func newMockIndex() mockIndex {
ix := mockIndex{
series: make(map[uint64]series),
postings: make(map[labels.Label][]uint64),
symbols: make(map[string]struct{}),
}
ix.postings[allPostingsKey] = []uint64{}
return ix
}
func (m mockIndex) Symbols() (map[string]struct{}, error) {
return m.symbols, nil
}
func (m mockIndex) AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error {
if _, ok := m.series[ref]; ok {
return errors.Errorf("series with reference %d already added", ref)
}
for _, lbl := range l {
m.symbols[lbl.Name] = struct{}{}
m.symbols[lbl.Value] = struct{}{}
if _, ok := m.postings[lbl]; !ok {
m.postings[lbl] = []uint64{}
}
m.postings[lbl] = append(m.postings[lbl], ref)
}
m.postings[allPostingsKey] = append(m.postings[allPostingsKey], ref)
s := series{l: l}
// Actual chunk data is not stored in the index.
for _, c := range chunks {
c.Chunk = nil
s.chunks = append(s.chunks, c)
}
m.series[ref] = s
return nil
}
func (m mockIndex) Close() error {
return nil
}
Replace StringTuples with []string Benchmarks show slight cpu/allocs improvements. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42% BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73% BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42% BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78% BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37% BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89% BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89% BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82% BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76% BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28% BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00% BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00% BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
func (m mockIndex) LabelValues(name string) ([]string, error) {
values := []string{}
for l := range m.postings {
if l.Name == name {
values = append(values, l.Value)
}
}
Replace StringTuples with []string Benchmarks show slight cpu/allocs improvements. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42% BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73% BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42% BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78% BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37% BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89% BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89% BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82% BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76% BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28% BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00% BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00% BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
return values, nil
}
Reduce memory used by postings offset table. Rather than keeping the offset of each postings list, instead keep the nth offset of the offset of the posting list. As postings list offsets have always been sorted, we can then get to the closest entry before the one we want an iterate forwards. I haven't done much tuning on the 32 number, it was chosen to try not to read through more than a 4k page of data. Switch to a bulk interface for fetching postings. Use it to avoid having to re-read parts of the posting offset table when querying lots of it. For a index with what BenchmarkHeadPostingForMatchers uses RAM for r.postings drops from 3.79MB to 80.19kB or about 48x. Bytes allocated go down by 30%, and suprisingly CPU usage drops by 4-6% for typical queries too. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05% BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56% BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70% BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01% BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01% BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69% BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69% BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
func (m mockIndex) Postings(name string, values ...string) (Postings, error) {
p := []Postings{}
for _, value := range values {
l := labels.Label{Name: name, Value: value}
p = append(p, m.SortedPostings(NewListPostings(m.postings[l])))
Reduce memory used by postings offset table. Rather than keeping the offset of each postings list, instead keep the nth offset of the offset of the posting list. As postings list offsets have always been sorted, we can then get to the closest entry before the one we want an iterate forwards. I haven't done much tuning on the 32 number, it was chosen to try not to read through more than a 4k page of data. Switch to a bulk interface for fetching postings. Use it to avoid having to re-read parts of the posting offset table when querying lots of it. For a index with what BenchmarkHeadPostingForMatchers uses RAM for r.postings drops from 3.79MB to 80.19kB or about 48x. Bytes allocated go down by 30%, and suprisingly CPU usage drops by 4-6% for typical queries too. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05% BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56% BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70% BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01% BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01% BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69% BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69% BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
}
return Merge(p...), nil
}
func (m mockIndex) SortedPostings(p Postings) Postings {
ep, err := ExpandPostings(p)
if err != nil {
return ErrPostings(errors.Wrap(err, "expand postings"))
}
sort.Slice(ep, func(i, j int) bool {
return labels.Compare(m.series[ep[i]].l, m.series[ep[j]].l) < 0
})
return NewListPostings(ep)
}
func (m mockIndex) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error {
s, ok := m.series[ref]
if !ok {
return errors.New("not found")
}
*lset = append((*lset)[:0], s.l...)
*chks = append((*chks)[:0], s.chunks...)
return nil
}
8 years ago
func TestIndexRW_Create_Open(t *testing.T) {
dir, err := ioutil.TempDir("", "test_index_create")
testutil.Ok(t, err)
defer func() {
testutil.Ok(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename)
8 years ago
// An empty index must still result in a readable file.
iw, err := NewWriter(context.Background(), fn)
testutil.Ok(t, err)
testutil.Ok(t, iw.Close())
ir, err := NewFileReader(fn)
testutil.Ok(t, err)
testutil.Ok(t, ir.Close())
8 years ago
// Modify magic header must cause open to fail.
f, err := os.OpenFile(fn, os.O_WRONLY, 0666)
testutil.Ok(t, err)
8 years ago
_, err = f.WriteAt([]byte{0, 0}, 0)
testutil.Ok(t, err)
f.Close()
_, err = NewFileReader(dir)
testutil.NotOk(t, err)
8 years ago
}
func TestIndexRW_Postings(t *testing.T) {
dir, err := ioutil.TempDir("", "test_index_postings")
testutil.Ok(t, err)
defer func() {
testutil.Ok(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename)
iw, err := NewWriter(context.Background(), fn)
testutil.Ok(t, err)
series := []labels.Labels{
labels.FromStrings("a", "1", "b", "1"),
labels.FromStrings("a", "1", "b", "2"),
labels.FromStrings("a", "1", "b", "3"),
labels.FromStrings("a", "1", "b", "4"),
}
Stream symbols during compaction. (#6468) Rather than buffer up symbols in RAM, do it one by one during compaction. Then use the reader's symbol handling for symbol lookups during the rest of the index write. There is some slowdown in compaction, due to having to look through a file rather than a hash lookup. This is noise to the overall cost of compacting series with thousands of samples though. benchmark old ns/op new ns/op delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35% benchmark old allocs new allocs delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59% benchmark old bytes new bytes delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
testutil.Ok(t, iw.AddSymbol("1"))
testutil.Ok(t, iw.AddSymbol("2"))
testutil.Ok(t, iw.AddSymbol("3"))
testutil.Ok(t, iw.AddSymbol("4"))
testutil.Ok(t, iw.AddSymbol("a"))
testutil.Ok(t, iw.AddSymbol("b"))
// Postings lists are only written if a series with the respective
// reference was added before.
testutil.Ok(t, iw.AddSeries(1, series[0]))
testutil.Ok(t, iw.AddSeries(2, series[1]))
testutil.Ok(t, iw.AddSeries(3, series[2]))
testutil.Ok(t, iw.AddSeries(4, series[3]))
testutil.Ok(t, iw.Close())
ir, err := NewFileReader(fn)
testutil.Ok(t, err)
p, err := ir.Postings("a", "1")
testutil.Ok(t, err)
var l labels.Labels
var c []chunks.Meta
for i := 0; p.Next(); i++ {
err := ir.Series(p.At(), &l, &c)
testutil.Ok(t, err)
testutil.Equals(t, 0, len(c))
testutil.Equals(t, series[i], l)
}
testutil.Ok(t, p.Err())
Load only some offsets into the symbol table into memory. Rather than keeping the entire symbol table in memory, keep every nth offset and walk from there to the entry we need. This ends up slightly slower, ~360ms per 1M series returned from PostingsForMatchers which is not much considering the rest of the CPU such a query would go on to use. Make LabelValues use the postings tables, rather than having to do symbol lookups. Use yoloString, as PostingsForMatchers doesn't need the strings to stick around and adjust the API call to keep the Querier open until it's all marshalled. Remove allocatedSymbols memory optimisation, we no longer keep all the symbol strings in heap memory. Remove LabelValuesFor and LabelIndices, they're dead code. Ensure we've still tests for label indices, and add missing test that we can work with old V1 Format index files. PostingForMatchers performance is slightly better, with a big drop in allocation counts due to using yoloString for LabelValues: benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18% BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73% BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99% BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99% BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03% BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03% BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06% However overall Select performance is down and involves more allocs, due to having to do more than a simple map lookup to resolve a symbol and that all the strings returned are allocated: benchmark old ns/op new ns/op delta BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46% BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26% BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60% BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13% BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29% BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74% BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90% benchmark old allocs new allocs delta BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00% BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00% BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00% BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97% BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75% BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62% BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33% benchmark old bytes new bytes delta BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09% BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09% BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09% BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07% BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88% BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12% BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
// The label incides are no longer used, so test them by hand here.
labelIndices := map[string][]string{}
testutil.Ok(t, ReadOffsetTable(ir.b, ir.toc.LabelIndicesTable, func(key []string, off uint64, _ int) error {
if len(key) != 1 {
return errors.Errorf("unexpected key length for label indices table %d", len(key))
}
d := encoding.NewDecbufAt(ir.b, int(off), castagnoliTable)
vals := []string{}
nc := d.Be32int()
if nc != 1 {
Spelling (#6517) * spelling: alertmanager Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: attributes Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: autocomplete Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: bootstrap Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: caught Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: chunkenc Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: compaction Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: corrupted Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: deletable Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: expected Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: fine-grained Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: initialized Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: iteration Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: javascript Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: multiple Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: number Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: overlapping Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: possible Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: postings Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: procedure Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: programmatic Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: queuing Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: querier Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: repairing Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: received Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: reproducible Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: retention Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: sample Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: segements Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: semantic Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: software [LICENSE] Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: staging Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: timestamp Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: unfortunately Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: uvarint Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: subsequently Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: ressamples Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
5 years ago
return errors.Errorf("unexpected number of label indices table names %d", nc)
Load only some offsets into the symbol table into memory. Rather than keeping the entire symbol table in memory, keep every nth offset and walk from there to the entry we need. This ends up slightly slower, ~360ms per 1M series returned from PostingsForMatchers which is not much considering the rest of the CPU such a query would go on to use. Make LabelValues use the postings tables, rather than having to do symbol lookups. Use yoloString, as PostingsForMatchers doesn't need the strings to stick around and adjust the API call to keep the Querier open until it's all marshalled. Remove allocatedSymbols memory optimisation, we no longer keep all the symbol strings in heap memory. Remove LabelValuesFor and LabelIndices, they're dead code. Ensure we've still tests for label indices, and add missing test that we can work with old V1 Format index files. PostingForMatchers performance is slightly better, with a big drop in allocation counts due to using yoloString for LabelValues: benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18% BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73% BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99% BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99% BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03% BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03% BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06% However overall Select performance is down and involves more allocs, due to having to do more than a simple map lookup to resolve a symbol and that all the strings returned are allocated: benchmark old ns/op new ns/op delta BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46% BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26% BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60% BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13% BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29% BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74% BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90% benchmark old allocs new allocs delta BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00% BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00% BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00% BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97% BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75% BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62% BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33% benchmark old bytes new bytes delta BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09% BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09% BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09% BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07% BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88% BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12% BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
}
for i := d.Be32(); i > 0; i-- {
v, err := ir.lookupSymbol(d.Be32())
if err != nil {
return err
}
vals = append(vals, v)
}
labelIndices[key[0]] = vals
return d.Err()
}))
testutil.Equals(t, map[string][]string{
"a": {"1"},
"b": {"1", "2", "3", "4"},
Load only some offsets into the symbol table into memory. Rather than keeping the entire symbol table in memory, keep every nth offset and walk from there to the entry we need. This ends up slightly slower, ~360ms per 1M series returned from PostingsForMatchers which is not much considering the rest of the CPU such a query would go on to use. Make LabelValues use the postings tables, rather than having to do symbol lookups. Use yoloString, as PostingsForMatchers doesn't need the strings to stick around and adjust the API call to keep the Querier open until it's all marshalled. Remove allocatedSymbols memory optimisation, we no longer keep all the symbol strings in heap memory. Remove LabelValuesFor and LabelIndices, they're dead code. Ensure we've still tests for label indices, and add missing test that we can work with old V1 Format index files. PostingForMatchers performance is slightly better, with a big drop in allocation counts due to using yoloString for LabelValues: benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 36698 36681 -0.05% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 522786 560887 +7.29% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 511652 537680 +5.09% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 522102 564239 +8.07% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113689911 111795919 -1.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 135825572 132871085 -2.18% BenchmarkPostingsForMatchers/Block/i=~""-4 40782628 38038181 -6.73% BenchmarkPostingsForMatchers/Block/i!=""-4 31267869 29194327 -6.63% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112733329 111568823 -1.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112868153 111232029 -1.45% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 31338257 29349446 -6.35% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 32054482 29972436 -6.50% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 136504654 133968442 -1.86% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 27960350 27264997 -2.49% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 136765564 133860724 -2.12% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 163714583 159453668 -2.60% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100012 12 -99.99% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200040 100040 -49.99% BenchmarkPostingsForMatchers/Block/i=~""-4 200045 100045 -49.99% BenchmarkPostingsForMatchers/Block/i!=""-4 200041 100041 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100017 17 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100023 23 -99.98% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200046 100046 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200050 100050 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200049 100049 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111150 11150 -89.97% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200055 100055 -49.99% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311238 111238 -64.26% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 552 1544 +179.71% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600482 1606125 +0.35% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17259065 17264709 +0.03% BenchmarkPostingsForMatchers/Block/i=~""-4 17259150 17264780 +0.03% BenchmarkPostingsForMatchers/Block/i!=""-4 17259048 17264680 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600610 1606242 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600813 1606434 +0.35% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17259176 17264808 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17259304 17264936 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17259333 17264965 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3142628 3148262 +0.18% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17259509 17265141 +0.03% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20405680 20416944 +0.06% However overall Select performance is down and involves more allocs, due to having to do more than a simple map lookup to resolve a symbol and that all the strings returned are allocated: benchmark old ns/op new ns/op delta BenchmarkQuerierSelect/Block/1of1000000-4 506092636 862678244 +70.46% BenchmarkQuerierSelect/Block/10of1000000-4 505638968 860917636 +70.26% BenchmarkQuerierSelect/Block/100of1000000-4 505229450 882150048 +74.60% BenchmarkQuerierSelect/Block/1000of1000000-4 515905414 862241115 +67.13% BenchmarkQuerierSelect/Block/10000of1000000-4 516785354 874841110 +69.29% BenchmarkQuerierSelect/Block/100000of1000000-4 540742808 907030187 +67.74% BenchmarkQuerierSelect/Block/1000000of1000000-4 815224288 1181236903 +44.90% benchmark old allocs new allocs delta BenchmarkQuerierSelect/Block/1of1000000-4 4000020 6000020 +50.00% BenchmarkQuerierSelect/Block/10of1000000-4 4000038 6000038 +50.00% BenchmarkQuerierSelect/Block/100of1000000-4 4000218 6000218 +50.00% BenchmarkQuerierSelect/Block/1000of1000000-4 4002018 6002018 +49.97% BenchmarkQuerierSelect/Block/10000of1000000-4 4020018 6020018 +49.75% BenchmarkQuerierSelect/Block/100000of1000000-4 4200018 6200018 +47.62% BenchmarkQuerierSelect/Block/1000000of1000000-4 6000018 8000019 +33.33% benchmark old bytes new bytes delta BenchmarkQuerierSelect/Block/1of1000000-4 176001468 227201476 +29.09% BenchmarkQuerierSelect/Block/10of1000000-4 176002620 227202628 +29.09% BenchmarkQuerierSelect/Block/100of1000000-4 176014140 227214148 +29.09% BenchmarkQuerierSelect/Block/1000of1000000-4 176129340 227329348 +29.07% BenchmarkQuerierSelect/Block/10000of1000000-4 177281340 228481348 +28.88% BenchmarkQuerierSelect/Block/100000of1000000-4 188801340 240001348 +27.12% BenchmarkQuerierSelect/Block/1000000of1000000-4 304001340 355201616 +16.84% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
}, labelIndices)
testutil.Ok(t, ir.Close())
}
Reduce memory used by postings offset table. Rather than keeping the offset of each postings list, instead keep the nth offset of the offset of the posting list. As postings list offsets have always been sorted, we can then get to the closest entry before the one we want an iterate forwards. I haven't done much tuning on the 32 number, it was chosen to try not to read through more than a 4k page of data. Switch to a bulk interface for fetching postings. Use it to avoid having to re-read parts of the posting offset table when querying lots of it. For a index with what BenchmarkHeadPostingForMatchers uses RAM for r.postings drops from 3.79MB to 80.19kB or about 48x. Bytes allocated go down by 30%, and suprisingly CPU usage drops by 4-6% for typical queries too. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05% BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56% BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70% BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01% BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01% BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69% BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69% BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
func TestPostingsMany(t *testing.T) {
dir, err := ioutil.TempDir("", "test_postings_many")
testutil.Ok(t, err)
defer func() {
testutil.Ok(t, os.RemoveAll(dir))
}()
fn := filepath.Join(dir, indexFilename)
iw, err := NewWriter(context.Background(), fn)
Reduce memory used by postings offset table. Rather than keeping the offset of each postings list, instead keep the nth offset of the offset of the posting list. As postings list offsets have always been sorted, we can then get to the closest entry before the one we want an iterate forwards. I haven't done much tuning on the 32 number, it was chosen to try not to read through more than a 4k page of data. Switch to a bulk interface for fetching postings. Use it to avoid having to re-read parts of the posting offset table when querying lots of it. For a index with what BenchmarkHeadPostingForMatchers uses RAM for r.postings drops from 3.79MB to 80.19kB or about 48x. Bytes allocated go down by 30%, and suprisingly CPU usage drops by 4-6% for typical queries too. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05% BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56% BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70% BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01% BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01% BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69% BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69% BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
testutil.Ok(t, err)
// Create a label in the index which has 999 values.
symbols := map[string]struct{}{}
series := []labels.Labels{}
for i := 1; i < 1000; i++ {
v := fmt.Sprintf("%03d", i)
series = append(series, labels.FromStrings("i", v, "foo", "bar"))
symbols[v] = struct{}{}
}
symbols["i"] = struct{}{}
symbols["foo"] = struct{}{}
symbols["bar"] = struct{}{}
Stream symbols during compaction. (#6468) Rather than buffer up symbols in RAM, do it one by one during compaction. Then use the reader's symbol handling for symbol lookups during the rest of the index write. There is some slowdown in compaction, due to having to look through a file rather than a hash lookup. This is noise to the overall cost of compacting series with thousands of samples though. benchmark old ns/op new ns/op delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35% benchmark old allocs new allocs delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59% benchmark old bytes new bytes delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
syms := []string{}
for s := range symbols {
syms = append(syms, s)
}
sort.Strings(syms)
for _, s := range syms {
testutil.Ok(t, iw.AddSymbol(s))
}
Reduce memory used by postings offset table. Rather than keeping the offset of each postings list, instead keep the nth offset of the offset of the posting list. As postings list offsets have always been sorted, we can then get to the closest entry before the one we want an iterate forwards. I haven't done much tuning on the 32 number, it was chosen to try not to read through more than a 4k page of data. Switch to a bulk interface for fetching postings. Use it to avoid having to re-read parts of the posting offset table when querying lots of it. For a index with what BenchmarkHeadPostingForMatchers uses RAM for r.postings drops from 3.79MB to 80.19kB or about 48x. Bytes allocated go down by 30%, and suprisingly CPU usage drops by 4-6% for typical queries too. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05% BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56% BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70% BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01% BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01% BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69% BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69% BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
for i, s := range series {
testutil.Ok(t, iw.AddSeries(uint64(i), s))
}
testutil.Ok(t, iw.Close())
ir, err := NewFileReader(fn)
testutil.Ok(t, err)
defer func() { testutil.Ok(t, ir.Close()) }()
Reduce memory used by postings offset table. Rather than keeping the offset of each postings list, instead keep the nth offset of the offset of the posting list. As postings list offsets have always been sorted, we can then get to the closest entry before the one we want an iterate forwards. I haven't done much tuning on the 32 number, it was chosen to try not to read through more than a 4k page of data. Switch to a bulk interface for fetching postings. Use it to avoid having to re-read parts of the posting offset table when querying lots of it. For a index with what BenchmarkHeadPostingForMatchers uses RAM for r.postings drops from 3.79MB to 80.19kB or about 48x. Bytes allocated go down by 30%, and suprisingly CPU usage drops by 4-6% for typical queries too. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Block/n="1"-4 35231 36673 +4.09% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 563380 540627 -4.04% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 536782 534186 -0.48% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 533990 541550 +1.42% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113374598 117969608 +4.05% BenchmarkPostingsForMatchers/Block/i=~".+"-4 146329884 139651442 -4.56% BenchmarkPostingsForMatchers/Block/i=~""-4 50346510 44961127 -10.70% BenchmarkPostingsForMatchers/Block/i!=""-4 41261550 35356165 -14.31% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112544418 116904010 +3.87% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 112487086 116864918 +3.89% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 41094758 35457904 -13.72% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 41906372 36151473 -13.73% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 147262414 140424800 -4.64% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 28615629 27872072 -2.60% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 147117177 140462403 -4.52% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 175096826 167902298 -4.11% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Block/n="1"-4 4 6 +50.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 7 11 +57.14% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 17 +13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 100010 100012 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 200069 200040 -0.01% BenchmarkPostingsForMatchers/Block/i=~""-4 200072 200045 -0.01% BenchmarkPostingsForMatchers/Block/i!=""-4 200070 200041 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 100013 100017 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 100017 100023 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 200073 200046 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 200075 200050 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 200074 200049 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 111165 111150 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 200078 200055 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 311282 311238 -0.01% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Block/n="1"-4 264 296 +12.12% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 360 424 +17.78% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 520 552 +6.15% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1600461 1600482 +0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 24900801 17259077 -30.69% BenchmarkPostingsForMatchers/Block/i=~""-4 24900836 17259151 -30.69% BenchmarkPostingsForMatchers/Block/i!=""-4 24900760 17259048 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1600557 1600621 +0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1600717 1600813 +0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 24900856 17259176 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 24900952 17259304 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 24900993 17259333 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3788311 3142630 -17.04% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 24901137 17259509 -30.69% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 28693086 20405680 -28.88% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
cases := []struct {
in []string
}{
// Simple cases, everything is present.
{in: []string{"002"}},
{in: []string{"031", "032", "033"}},
{in: []string{"032", "033"}},
{in: []string{"127", "128"}},
{in: []string{"127", "128", "129"}},
{in: []string{"127", "129"}},
{in: []string{"128", "129"}},
{in: []string{"998", "999"}},
{in: []string{"999"}},
// Before actual values.
{in: []string{"000"}},
{in: []string{"000", "001"}},
{in: []string{"000", "002"}},
// After actual values.
{in: []string{"999a"}},
{in: []string{"999", "999a"}},
{in: []string{"998", "999", "999a"}},
// In the middle of actual values.
{in: []string{"126a", "127", "128"}},
{in: []string{"127", "127a", "128"}},
{in: []string{"127", "127a", "128", "128a", "129"}},
{in: []string{"127", "128a", "129"}},
{in: []string{"128", "128a", "129"}},
{in: []string{"128", "129", "129a"}},
{in: []string{"126a", "126b", "127", "127a", "127b", "128", "128a", "128b", "129", "129a", "129b"}},
}
for _, c := range cases {
it, err := ir.Postings("i", c.in...)
testutil.Ok(t, err)
got := []string{}
var lbls labels.Labels
var metas []chunks.Meta
for it.Next() {
testutil.Ok(t, ir.Series(it.At(), &lbls, &metas))
got = append(got, lbls.Get("i"))
}
testutil.Ok(t, it.Err())
exp := []string{}
for _, e := range c.in {
if _, ok := symbols[e]; ok && e != "l" {
exp = append(exp, e)
}
}
testutil.Equals(t, exp, got, fmt.Sprintf("input: %v", c.in))
}
}
func TestPersistence_index_e2e(t *testing.T) {
dir, err := ioutil.TempDir("", "test_persistence_e2e")
testutil.Ok(t, err)
defer func() {
testutil.Ok(t, os.RemoveAll(dir))
}()
lbls, err := labels.ReadLabels(filepath.Join("..", "testdata", "20kseries.json"), 20000)
testutil.Ok(t, err)
// Sort labels as the index writer expects series in sorted order.
sort.Sort(labels.Slice(lbls))
symbols := map[string]struct{}{}
for _, lset := range lbls {
for _, l := range lset {
symbols[l.Name] = struct{}{}
symbols[l.Value] = struct{}{}
}
}
var input indexWriterSeriesSlice
// Generate ChunkMetas for every label set.
for i, lset := range lbls {
var metas []chunks.Meta
for j := 0; j <= (i % 20); j++ {
metas = append(metas, chunks.Meta{
MinTime: int64(j * 10000),
MaxTime: int64((j + 1) * 10000),
Ref: rand.Uint64(),
Chunk: chunkenc.NewXORChunk(),
})
}
input = append(input, &indexWriterSeries{
labels: lset,
chunks: metas,
})
}
iw, err := NewWriter(context.Background(), filepath.Join(dir, indexFilename))
testutil.Ok(t, err)
Stream symbols during compaction. (#6468) Rather than buffer up symbols in RAM, do it one by one during compaction. Then use the reader's symbol handling for symbol lookups during the rest of the index write. There is some slowdown in compaction, due to having to look through a file rather than a hash lookup. This is noise to the overall cost of compacting series with thousands of samples though. benchmark old ns/op new ns/op delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35% benchmark old allocs new allocs delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59% benchmark old bytes new bytes delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
syms := []string{}
for s := range symbols {
syms = append(syms, s)
}
sort.Strings(syms)
for _, s := range syms {
testutil.Ok(t, iw.AddSymbol(s))
}
// Population procedure as done by compaction.
var (
postings = NewMemPostings()
values = map[string]map[string]struct{}{}
)
mi := newMockIndex()
for i, s := range input {
err = iw.AddSeries(uint64(i), s.labels, s.chunks...)
testutil.Ok(t, err)
testutil.Ok(t, mi.AddSeries(uint64(i), s.labels, s.chunks...))
for _, l := range s.labels {
valset, ok := values[l.Name]
if !ok {
valset = map[string]struct{}{}
values[l.Name] = valset
}
valset[l.Value] = struct{}{}
}
postings.Add(uint64(i), s.labels)
}
err = iw.Close()
testutil.Ok(t, err)
ir, err := NewFileReader(filepath.Join(dir, indexFilename))
testutil.Ok(t, err)
for p := range mi.postings {
gotp, err := ir.Postings(p.Name, p.Value)
testutil.Ok(t, err)
expp, err := mi.Postings(p.Name, p.Value)
testutil.Ok(t, err)
var lset, explset labels.Labels
var chks, expchks []chunks.Meta
for gotp.Next() {
testutil.Assert(t, expp.Next() == true, "")
ref := gotp.At()
err := ir.Series(ref, &lset, &chks)
testutil.Ok(t, err)
err = mi.Series(expp.At(), &explset, &expchks)
testutil.Ok(t, err)
testutil.Equals(t, explset, lset)
testutil.Equals(t, expchks, chks)
}
testutil.Assert(t, expp.Next() == false, "Expected no more postings for %q=%q", p.Name, p.Value)
testutil.Ok(t, gotp.Err())
}
labelPairs := map[string][]string{}
for l := range mi.postings {
labelPairs[l.Name] = append(labelPairs[l.Name], l.Value)
}
for k, v := range labelPairs {
sort.Strings(v)
Replace StringTuples with []string Benchmarks show slight cpu/allocs improvements. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42% BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73% BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42% BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78% BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37% BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89% BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89% BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82% BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76% BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28% BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00% BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00% BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
res, err := ir.LabelValues(k)
testutil.Ok(t, err)
Replace StringTuples with []string Benchmarks show slight cpu/allocs improvements. benchmark old ns/op new ns/op delta BenchmarkPostingsForMatchers/Head/n="1"-4 269978625 235305110 -12.84% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 129739974 121646193 -6.24% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 123826274 122056253 -1.43% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 126962188 130038235 +2.42% BenchmarkPostingsForMatchers/Head/i=~".*"-4 6423653989 5991126455 -6.73% BenchmarkPostingsForMatchers/Head/i=~".+"-4 6934647521 7033370634 +1.42% BenchmarkPostingsForMatchers/Head/i=~""-4 1177781285 1121497736 -4.78% BenchmarkPostingsForMatchers/Head/i!=""-4 7033680256 7246094991 +3.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 293702332 287440212 -2.13% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 307628268 307039964 -0.19% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 512247746 480003862 -6.29% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 361199794 367066917 +1.62% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 478863761 476037784 -0.59% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 103394659 102902098 -0.48% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 482552781 475453903 -1.47% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 559257389 589297047 +5.37% BenchmarkPostingsForMatchers/Block/n="1"-4 36492 37012 +1.42% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 557788 611903 +9.70% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 554443 573814 +3.49% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 553227 553826 +0.11% BenchmarkPostingsForMatchers/Block/i=~".*"-4 113855090 111707221 -1.89% BenchmarkPostingsForMatchers/Block/i=~".+"-4 133994674 136520728 +1.89% BenchmarkPostingsForMatchers/Block/i=~""-4 38138091 36299898 -4.82% BenchmarkPostingsForMatchers/Block/i!=""-4 28861213 27396723 -5.07% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 112699941 110853868 -1.64% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 113198026 111389742 -1.60% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 28994069 27363804 -5.62% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 29709406 28589223 -3.77% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 134695119 135736971 +0.77% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 26783286 25826928 -3.57% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 134733254 134116739 -0.46% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 160713937 158802768 -1.19% benchmark old allocs new allocs delta BenchmarkPostingsForMatchers/Head/n="1"-4 36 36 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 38 38 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 42 40 -4.76% BenchmarkPostingsForMatchers/Head/i=~".*"-4 61 59 -3.28% BenchmarkPostingsForMatchers/Head/i=~".+"-4 100088 100087 -0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 100053 100051 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 100087 100085 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 44 42 -4.55% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 50 48 -4.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 100076 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 100077 100075 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 100077 100074 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 11167 11165 -0.02% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 100082 100080 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 111265 111261 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 6 6 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 11 11 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 15 13 -13.33% BenchmarkPostingsForMatchers/Block/i=~".*"-4 12 10 -16.67% BenchmarkPostingsForMatchers/Block/i=~".+"-4 100040 100038 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 100045 100043 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 100041 100039 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 17 15 -11.76% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 23 21 -8.70% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 100046 100044 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 100050 100048 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 100049 100047 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 11150 11148 -0.02% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 100055 100053 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 111238 111234 -0.00% benchmark old bytes new bytes delta BenchmarkPostingsForMatchers/Head/n="1"-4 10887816 10887817 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j="foo"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/j="foo",n="1"-4 5456648 5456648 +0.00% BenchmarkPostingsForMatchers/Head/n="1",j!="foo"-4 5456792 5456712 -0.00% BenchmarkPostingsForMatchers/Head/i=~".*"-4 258254408 258254328 -0.00% BenchmarkPostingsForMatchers/Head/i=~".+"-4 273912888 273912904 +0.00% BenchmarkPostingsForMatchers/Head/i=~""-4 17266680 17266600 -0.00% BenchmarkPostingsForMatchers/Head/i!=""-4 273912416 273912336 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",j="foo"-4 7062578 7062498 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".*",i!="2",j="foo"-4 7062770 7062690 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!=""-4 28152346 28152266 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i!="",j="foo"-4 22721178 22721098 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",j="foo"-4 22721336 22721224 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~"1.+",j="foo"-4 3623804 3623733 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!="2",j="foo"-4 22721480 22721400 -0.00% BenchmarkPostingsForMatchers/Head/n="1",i=~".+",i!~"2.*",j="foo"-4 24816652 24816444 -0.00% BenchmarkPostingsForMatchers/Block/n="1"-4 296 296 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j="foo"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/j="foo",n="1"-4 424 424 +0.00% BenchmarkPostingsForMatchers/Block/n="1",j!="foo"-4 1544 1464 -5.18% BenchmarkPostingsForMatchers/Block/i=~".*"-4 1606114 1606045 -0.00% BenchmarkPostingsForMatchers/Block/i=~".+"-4 17264709 17264629 -0.00% BenchmarkPostingsForMatchers/Block/i=~""-4 17264780 17264696 -0.00% BenchmarkPostingsForMatchers/Block/i!=""-4 17264680 17264600 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",j="foo"-4 1606253 1606165 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i=~".*",i!="2",j="foo"-4 1606445 1606348 -0.01% BenchmarkPostingsForMatchers/Block/n="1",i!=""-4 17264808 17264728 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i!="",j="foo"-4 17264936 17264856 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",j="foo"-4 17264965 17264885 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~"1.+",j="foo"-4 3148262 3148182 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!="2",j="foo"-4 17265141 17265061 -0.00% BenchmarkPostingsForMatchers/Block/n="1",i=~".+",i!~"2.*",j="foo"-4 20416944 20416784 -0.00% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
testutil.Equals(t, len(v), len(res))
for i := 0; i < len(v); i++ {
testutil.Equals(t, v[i], res[i])
}
}
Stream symbols during compaction. (#6468) Rather than buffer up symbols in RAM, do it one by one during compaction. Then use the reader's symbol handling for symbol lookups during the rest of the index write. There is some slowdown in compaction, due to having to look through a file rather than a hash lookup. This is noise to the overall cost of compacting series with thousands of samples though. benchmark old ns/op new ns/op delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35% benchmark old allocs new allocs delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59% benchmark old bytes new bytes delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
gotSymbols := []string{}
it := ir.Symbols()
for it.Next() {
gotSymbols = append(gotSymbols, it.At())
}
testutil.Ok(t, it.Err())
expSymbols := []string{}
for s := range mi.symbols {
Stream symbols during compaction. (#6468) Rather than buffer up symbols in RAM, do it one by one during compaction. Then use the reader's symbol handling for symbol lookups during the rest of the index write. There is some slowdown in compaction, due to having to look through a file rather than a hash lookup. This is noise to the overall cost of compacting series with thousands of samples though. benchmark old ns/op new ns/op delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35% benchmark old allocs new allocs delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59% benchmark old bytes new bytes delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
expSymbols = append(expSymbols, s)
}
Stream symbols during compaction. (#6468) Rather than buffer up symbols in RAM, do it one by one during compaction. Then use the reader's symbol handling for symbol lookups during the rest of the index write. There is some slowdown in compaction, due to having to look through a file rather than a hash lookup. This is noise to the overall cost of compacting series with thousands of samples though. benchmark old ns/op new ns/op delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 539917175 675341565 +25.08% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 2441815993 2477453524 +1.46% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3978543559 3922909687 -1.40% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 8430219716 8586610007 +1.86% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 1786424591 1909552782 +6.89% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 5328998202 6020839950 +12.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 10085059958 11085278690 +9.92% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 25497010155 27018079806 +5.97% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 2427391406 2817217987 +16.06% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 2592965497 2538805050 -2.09% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 2437388343 2668012858 +9.46% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 2317095324 2787423966 +20.30% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 2600239857 2096973860 -19.35% benchmark old allocs new allocs delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 500851 470794 -6.00% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 821527 791451 -3.66% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 1141562 1111508 -2.63% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 2141576 2111504 -1.40% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 871466 841424 -3.45% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 1941428 1911415 -1.55% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 3071573 3041510 -0.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 6771648 6741509 -0.45% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 731493 824888 +12.77% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 793918 887311 +11.76% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 811842 905204 +11.50% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 832244 925081 +11.16% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 921553 1019162 +10.59% benchmark old bytes new bytes delta BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 40532648 35698276 -11.93% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 60340216 53409568 -11.49% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 81087336 72065552 -11.13% BenchmarkCompaction/type=normal,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 142485576 120878544 -15.16% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=101-4 208661368 203831136 -2.31% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=1001-4 347345904 340484696 -1.98% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=2001-4 585185856 576244648 -1.53% BenchmarkCompaction/type=vertical,blocks=4,series=10000,samplesPerSeriesPerBlock=5001-4 1357641792 1358966528 +0.10% BenchmarkCompactionFromHead/labelnames=1,labelvalues=100000-4 126486664 119666744 -5.39% BenchmarkCompactionFromHead/labelnames=10,labelvalues=10000-4 122323192 115117224 -5.89% BenchmarkCompactionFromHead/labelnames=100,labelvalues=1000-4 126404504 119469864 -5.49% BenchmarkCompactionFromHead/labelnames=1000,labelvalues=100-4 119047832 112230408 -5.73% BenchmarkCompactionFromHead/labelnames=10000,labelvalues=10-4 136576016 116634800 -14.60% Signed-off-by: Brian Brazil <brian.brazil@robustperception.io>
5 years ago
sort.Strings(expSymbols)
testutil.Equals(t, expSymbols, gotSymbols)
testutil.Ok(t, ir.Close())
}
Spelling (#6517) * spelling: alertmanager Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: attributes Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: autocomplete Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: bootstrap Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: caught Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: chunkenc Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: compaction Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: corrupted Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: deletable Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: expected Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: fine-grained Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: initialized Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: iteration Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: javascript Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: multiple Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: number Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: overlapping Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: possible Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: postings Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: procedure Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: programmatic Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: queuing Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: querier Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: repairing Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: received Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: reproducible Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: retention Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: sample Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: segements Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: semantic Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: software [LICENSE] Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: staging Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: timestamp Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: unfortunately Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: uvarint Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: subsequently Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> * spelling: ressamples Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
5 years ago
func TestDecbufUvarintWithInvalidBuffer(t *testing.T) {
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
db := encoding.NewDecbufUvarintAt(b, 0, castagnoliTable)
testutil.NotOk(t, db.Err())
}
func TestReaderWithInvalidBuffer(t *testing.T) {
b := realByteSlice([]byte{0x81, 0x81, 0x81, 0x81, 0x81, 0x81})
_, err := NewReader(b)
testutil.NotOk(t, err)
}
// TestNewFileReaderErrorNoOpenFiles ensures that in case of an error no file remains open.
func TestNewFileReaderErrorNoOpenFiles(t *testing.T) {
dir := testutil.NewTemporaryDirectory("block", t)
idxName := filepath.Join(dir.Path(), "index")
err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0644)
testutil.Ok(t, err)
_, err = NewFileReader(idxName)
testutil.NotOk(t, err)
// dir.Close will fail on Win if idxName fd is not closed on error path.
dir.Close()
}
func TestSymbols(t *testing.T) {
buf := encoding.Encbuf{}
// Add prefix to the buffer to simulate symbols as part of larger buffer.
buf.PutUvarintStr("something")
symbolsStart := buf.Len()
buf.PutBE32int(204) // Length of symbols table.
buf.PutBE32int(100) // Number of symbols.
for i := 0; i < 100; i++ {
// i represents index in unicode characters table.
buf.PutUvarintStr(string(i)) // Symbol.
}
checksum := crc32.Checksum(buf.Get()[symbolsStart+4:], castagnoliTable)
buf.PutBE32(checksum) // Check sum at the end.
s, err := NewSymbols(realByteSlice(buf.Get()), FormatV2, symbolsStart)
testutil.Ok(t, err)
// We store only 4 offsets to symbols.
testutil.Equals(t, 32, s.Size())
for i := 99; i >= 0; i-- {
s, err := s.Lookup(uint32(i))
testutil.Ok(t, err)
testutil.Equals(t, string(i), s)
}
_, err = s.Lookup(100)
testutil.NotOk(t, err)
for i := 99; i >= 0; i-- {
r, err := s.ReverseLookup(string(i))
testutil.Ok(t, err)
testutil.Equals(t, uint32(i), r)
}
_, err = s.ReverseLookup(string(100))
testutil.NotOk(t, err)
iter := s.Iter()
i := 0
for iter.Next() {
testutil.Equals(t, string(i), iter.At())
i++
}
testutil.Ok(t, iter.Err())
}