@ -27,17 +27,12 @@ import (
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/util/annotations"
)
var _ IndexReader = & OOO HeadIndexReader{ }
var _ IndexReader = & HeadAndOOO IndexReader { }
// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be
// accessed.
// It also has a reference to headIndexReader so we can leverage on its
// IndexReader implementation for all the methods that remain the same. We
// decided to do this to avoid code duplication.
// The only methods that change are the ones about getting Series and Postings.
type OOOHeadIndexReader struct {
type HeadAndOOOIndexReader struct {
* headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible.
lastGarbageCollectedMmapRef chunks . ChunkDiskMapperRef
}
@ -53,25 +48,16 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator
return storage . ChainSampleIteratorFromIterables ( iterator , o . chunkIterables )
}
func NewOOO HeadIndexReader ( head * Head , mint , maxt int64 , lastGarbageCollectedMmapRef chunks . ChunkDiskMapperRef ) * OOO HeadIndexReader {
func NewHeadAndOOO IndexReader ( head * Head , mint , maxt int64 , lastGarbageCollectedMmapRef chunks . ChunkDiskMapperRef ) * HeadAndOOO IndexReader {
hr := & headIndexReader {
head : head ,
mint : mint ,
maxt : maxt ,
}
return & OOO HeadIndexReader{ hr , lastGarbageCollectedMmapRef }
return & HeadAndOOO IndexReader { hr , lastGarbageCollectedMmapRef }
}
func ( oh * OOOHeadIndexReader ) Series ( ref storage . SeriesRef , builder * labels . ScratchBuilder , chks * [ ] chunks . Meta ) error {
return oh . series ( ref , builder , chks , oh . lastGarbageCollectedMmapRef , 0 )
}
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
// any chunk at or before this ref will not be considered. 0 disables this check.
//
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered.
func ( oh * OOOHeadIndexReader ) series ( ref storage . SeriesRef , builder * labels . ScratchBuilder , chks * [ ] chunks . Meta , lastGarbageCollectedMmapRef , maxMmapRef chunks . ChunkDiskMapperRef ) error {
func ( oh * HeadAndOOOIndexReader ) Series ( ref storage . SeriesRef , builder * labels . ScratchBuilder , chks * [ ] chunks . Meta ) error {
s := oh . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
if s == nil {
@ -88,10 +74,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
defer s . Unlock ( )
* chks = ( * chks ) [ : 0 ]
if s . ooo = = nil {
return n il
if s . ooo ! = nil {
return getOOOSeriesChunks ( s , oh . m int , oh . maxt , oh . lastGarbageCollectedMmapRef , 0 , true , chks )
}
* chks = appendSeriesChunks ( s , oh . mint , oh . maxt , * chks )
return nil
}
// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so
// any chunk at or before this ref will not be considered. 0 disables this check.
//
// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then
// the oooHeadChunk will not be considered.
func getOOOSeriesChunks ( s * memSeries , mint , maxt int64 , lastGarbageCollectedMmapRef , maxMmapRef chunks . ChunkDiskMapperRef , includeInOrder bool , chks * [ ] chunks . Meta ) error {
tmpChks := make ( [ ] chunks . Meta , 0 , len ( s . ooo . oooMmappedChunks ) )
addChunk := func ( minT , maxT int64 , ref chunks . ChunkRef , chunk chunkenc . Chunk ) {
@ -106,7 +101,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
// Collect all chunks that overlap the query range.
if s . ooo . oooHeadChunk != nil {
c := s . ooo . oooHeadChunk
if c . OverlapsClosedInterval ( oh . mint , oh . maxt ) && maxMmapRef == 0 {
if c . OverlapsClosedInterval ( mint , maxt ) && maxMmapRef == 0 {
ref := chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . oooHeadChunkID ( len ( s . ooo . oooMmappedChunks ) ) ) )
if len ( c . chunk . samples ) > 0 { // Empty samples happens in tests, at least.
chks , err := s . ooo . oooHeadChunk . chunk . ToEncodedChunks ( c . minTime , c . maxTime )
@ -125,12 +120,16 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
}
for i := len ( s . ooo . oooMmappedChunks ) - 1 ; i >= 0 ; i -- {
c := s . ooo . oooMmappedChunks [ i ]
if c . OverlapsClosedInterval ( oh . mint , oh . maxt ) && ( maxMmapRef == 0 || maxMmapRef . GreaterThanOrEqualTo ( c . ref ) ) && ( lastGarbageCollectedMmapRef == 0 || c . ref . GreaterThan ( lastGarbageCollectedMmapRef ) ) {
if c . OverlapsClosedInterval ( mint , maxt ) && ( maxMmapRef == 0 || maxMmapRef . GreaterThanOrEqualTo ( c . ref ) ) && ( lastGarbageCollectedMmapRef == 0 || c . ref . GreaterThan ( lastGarbageCollectedMmapRef ) ) {
ref := chunks . ChunkRef ( chunks . NewHeadChunkRef ( s . ref , s . oooHeadChunkID ( i ) ) )
addChunk ( c . minTime , c . maxTime , ref , nil )
}
}
if includeInOrder {
tmpChks = appendSeriesChunks ( s , mint , maxt , tmpChks )
}
// There is nothing to do if we did not collect any chunk.
if len ( tmpChks ) == 0 {
return nil
@ -167,11 +166,10 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
return nil
}
// LabelValues needs to be overridden from the headIndexReader implementation due
// to the check that happens at the beginning where we make sure that the query
// interval overlaps with the head minooot and maxooot.
func ( oh * OOOHeadIndexReader ) LabelValues ( ctx context . Context , name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
if oh . maxt < oh . head . MinOOOTime ( ) || oh . mint > oh . head . MaxOOOTime ( ) {
// LabelValues needs to be overridden from the headIndexReader implementation
// so we can return labels within either in-order range or ooo range.
func ( oh * HeadAndOOOIndexReader ) LabelValues ( ctx context . Context , name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
if oh . maxt < oh . head . MinTime ( ) && oh . maxt < oh . head . MinOOOTime ( ) || oh . mint > oh . head . MaxTime ( ) && oh . mint > oh . head . MaxOOOTime ( ) {
return [ ] string { } , nil
}
@ -223,41 +221,30 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) int {
}
}
func ( oh * OOOHeadIndexReader ) Postings ( ctx context . Context , name string , values ... string ) ( index . Postings , error ) {
switch len ( values ) {
case 0 :
return index . EmptyPostings ( ) , nil
case 1 :
return oh . head . postings . Get ( name , values [ 0 ] ) , nil // TODO(ganesh) Also call GetOOOPostings
default :
// TODO(ganesh) We want to only return postings for out of order series.
res := make ( [ ] index . Postings , 0 , len ( values ) )
for _ , value := range values {
res = append ( res , oh . head . postings . Get ( name , value ) ) // TODO(ganesh) Also call GetOOOPostings
}
return index . Merge ( ctx , res ... ) , nil
}
type HeadAndOOOChunkReader struct {
head * Head
mint , maxt int64
cr * headChunkReader // If nil, only read OOO chunks.
maxMmapRef chunks . ChunkDiskMapperRef
oooIsoState * oooIsolationState
}
type OOOHeadChunkReader struct {
head * Head
mint , maxt int64
isoState * oooIsolationState
maxMmapRef chunks . ChunkDiskMapperRef
}
func NewOOOHeadChunkReader ( head * Head , mint , maxt int64 , isoState * oooIsolationState , maxMmapRef chunks . ChunkDiskMapperRef ) * OOOHeadChunkReader {
return & OOOHeadChunkReader {
head : head ,
mint : mint ,
maxt : maxt ,
isoState : isoState ,
maxMmapRef : maxMmapRef ,
func NewHeadAndOOOChunkReader ( head * Head , mint , maxt int64 , cr * headChunkReader , oooIsoState * oooIsolationState , maxMmapRef chunks . ChunkDiskMapperRef ) * HeadAndOOOChunkReader {
return & HeadAndOOOChunkReader {
head : head ,
mint : mint ,
maxt : maxt ,
cr : cr ,
maxMmapRef : maxMmapRef ,
oooIsoState : oooIsoState ,
}
}
func ( cr OOOHeadChunkReader ) ChunkOrIterable ( meta chunks . Meta ) ( chunkenc . Chunk , chunkenc . Iterable , error ) {
sid , _ := chunks . HeadChunkRef ( meta . Ref ) . Unpack ( )
func ( cr * HeadAndOOOChunkReader ) ChunkOrIterable ( meta chunks . Meta ) ( chunkenc . Chunk , chunkenc . Iterable , error ) {
sid , _ , isOOO := unpackHeadChunkRef ( meta . Ref )
if ! isOOO {
return cr . cr . ChunkOrIterable ( meta )
}
s := cr . head . series . getByID ( sid )
// This means that the series has been garbage collected.
@ -266,34 +253,35 @@ func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk,
}
s . Lock ( )
if s . ooo == nil {
// There is no OOO data for this series.
s . Unlock ( )
return nil , nil , storage . ErrNotFound
}
mc , err := s . oooMergedChunks ( meta , cr . head . chunkDiskMapper , cr . mint , cr . maxt , cr . maxMmapRef )
mc , err := s . mergedChunks ( meta , cr . head . chunkDiskMapper , cr . cr , cr . mint , cr . maxt , cr . maxMmapRef )
s . Unlock ( )
if err != nil {
return nil , nil , err
}
// This means that the query range did not overlap with the requested chunk.
if len ( mc . chunkIterables ) == 0 {
return nil , nil , storage . ErrNotFound
}
return nil , mc , err
}
return nil , mc , nil
// ChunkOrIterableWithCopy: implements ChunkReaderWithCopy. The special Copy behaviour
// is only implemented for the in-order head chunk.
func ( cr * HeadAndOOOChunkReader ) ChunkOrIterableWithCopy ( meta chunks . Meta ) ( chunkenc . Chunk , chunkenc . Iterable , int64 , error ) {
_ , _ , isOOO := unpackHeadChunkRef ( meta . Ref )
if ! isOOO {
return cr . cr . ChunkOrIterableWithCopy ( meta )
}
chk , iter , err := cr . ChunkOrIterable ( meta )
return chk , iter , 0 , err
}
func ( cr OOOHeadChunkReader ) Close ( ) error {
if cr . isoState != nil {
cr . isoState . Close ( )
func ( cr * HeadAndOOOChunkReader ) Close ( ) error {
if cr . cr != nil && cr . cr . isoState != nil {
cr . cr . isoState . Close ( )
}
if cr . oooIsoState != nil {
cr . oooIsoState . Close ( )
}
return nil
}
type OOOCompactionHead struct {
oooIR * OOOHeadIndexReader
head * Head
lastMmapRef chunks . ChunkDiskMapperRef
lastWBLFile int
postings [ ] storage . SeriesRef
@ -310,6 +298,7 @@ type OOOCompactionHead struct {
// on the sample append latency. So call NewOOOCompactionHead only right before compaction.
func NewOOOCompactionHead ( ctx context . Context , head * Head ) ( * OOOCompactionHead , error ) {
ch := & OOOCompactionHead {
head : head ,
chunkRange : head . chunkRange . Load ( ) ,
mint : math . MaxInt64 ,
maxt : math . MinInt64 ,
@ -323,15 +312,14 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
ch . lastWBLFile = lastWBLFile
}
ch . oooIR = NewOOOHeadIndexReader ( head , math . MinInt64 , math . MaxInt64 , 0 )
hr := headIndexReader { head : head , mint : ch . mint , maxt : ch . maxt }
n , v := index . AllPostingsKey ( )
// TODO: verify this gets only ooo samples.
p , err := ch . oooIR . Postings ( ctx , n , v )
// TODO: filter to series with OOO samples, before sorting.
p , err := hr . Postings ( ctx , n , v )
if err != nil {
return nil , err
}
p = ch . oooIR . SortedPostings ( p )
p = hr . SortedPostings ( p )
var lastSeq , lastOff int
for p . Next ( ) {
@ -352,7 +340,7 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
}
var lastMmapRef chunks . ChunkDiskMapperRef
mmapRefs := ms . mmapCurrentOOOHeadChunk ( head . chunkDiskMapper )
mmapRefs := ms . mmapCurrentOOOHeadChunk ( head . chunkDiskMapper , head . logger )
if len ( mmapRefs ) == 0 && len ( ms . ooo . oooMmappedChunks ) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
mmapRefs = [ ] chunks . ChunkDiskMapperRef { ms . ooo . oooMmappedChunks [ len ( ms . ooo . oooMmappedChunks ) - 1 ] . ref }
@ -388,7 +376,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) {
}
func ( ch * OOOCompactionHead ) Chunks ( ) ( ChunkReader , error ) {
return NewOOO HeadChunkReader ( ch . oooIR . head , ch . oooIR . mint , ch . oooIR . maxt , nil , ch . lastMmapRef ) , nil
return NewHeadAndOOO ChunkReader ( ch . head , ch . mint , ch . maxt , nil , nil , ch . lastMmapRef ) , nil
}
func ( ch * OOOCompactionHead ) Tombstones ( ) ( tombstones . Reader , error ) {
@ -414,12 +402,12 @@ func (ch *OOOCompactionHead) Meta() BlockMeta {
// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead.
func ( ch * OOOCompactionHead ) CloneForTimeRange ( mint , maxt int64 ) * OOOCompactionHead {
return & OOOCompactionHead {
oooIR : NewOOOHeadIndexReader ( ch . oooIR . head , mint , maxt , 0 ) ,
head : ch . head ,
lastMmapRef : ch . lastMmapRef ,
postings : ch . postings ,
chunkRange : ch . chunkRange ,
mint : ch . mint ,
maxt : ch . maxt ,
mint : mint ,
maxt : maxt ,
}
}
@ -439,7 +427,8 @@ func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader {
}
func ( ir * OOOCompactionHeadIndexReader ) Symbols ( ) index . StringIter {
return ir . ch . oooIR . Symbols ( )
hr := headIndexReader { head : ir . ch . head , mint : ir . ch . mint , maxt : ir . ch . maxt }
return hr . Symbols ( )
}
func ( ir * OOOCompactionHeadIndexReader ) Postings ( _ context . Context , name string , values ... string ) ( index . Postings , error ) {
@ -460,11 +449,28 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P
}
func ( ir * OOOCompactionHeadIndexReader ) ShardedPostings ( p index . Postings , shardIndex , shardCount uint64 ) index . Postings {
return ir . ch . oooIR . ShardedPostings ( p , shardIndex , shardCount )
hr := headIndexReader { head : ir . ch . head , mint : ir . ch . mint , maxt : ir . ch . maxt }
return hr . ShardedPostings ( p , shardIndex , shardCount )
}
func ( ir * OOOCompactionHeadIndexReader ) Series ( ref storage . SeriesRef , builder * labels . ScratchBuilder , chks * [ ] chunks . Meta ) error {
return ir . ch . oooIR . series ( ref , builder , chks , 0 , ir . ch . lastMmapRef )
s := ir . ch . head . series . getByID ( chunks . HeadSeriesRef ( ref ) )
if s == nil {
ir . ch . head . metrics . seriesNotFound . Inc ( )
return storage . ErrNotFound
}
builder . Assign ( s . labels ( ) )
s . Lock ( )
defer s . Unlock ( )
* chks = ( * chks ) [ : 0 ]
if s . ooo == nil {
return nil
}
return getOOOSeriesChunks ( s , ir . ch . mint , ir . ch . maxt , 0 , ir . ch . lastMmapRef , false , chks )
}
func ( ir * OOOCompactionHeadIndexReader ) SortedLabelValues ( _ context . Context , name string , matchers ... * labels . Matcher ) ( [ ] string , error ) {
@ -492,5 +498,91 @@ func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, posti
}
func ( ir * OOOCompactionHeadIndexReader ) Close ( ) error {
return ir . ch . oooIR . Close ( )
return nil
}
// HeadAndOOOQuerier queries both the head and the out-of-order head.
type HeadAndOOOQuerier struct {
mint , maxt int64
head * Head
index IndexReader
chunkr ChunkReader
querier storage . Querier
}
func NewHeadAndOOOQuerier ( mint , maxt int64 , head * Head , oooIsoState * oooIsolationState , querier storage . Querier ) storage . Querier {
cr := & headChunkReader {
head : head ,
mint : mint ,
maxt : maxt ,
isoState : head . iso . State ( mint , maxt ) ,
}
return & HeadAndOOOQuerier {
mint : mint ,
maxt : maxt ,
head : head ,
index : NewHeadAndOOOIndexReader ( head , mint , maxt , oooIsoState . minRef ) ,
chunkr : NewHeadAndOOOChunkReader ( head , mint , maxt , cr , oooIsoState , 0 ) ,
querier : querier ,
}
}
func ( q * HeadAndOOOQuerier ) LabelValues ( ctx context . Context , name string , hints * storage . LabelHints , matchers ... * labels . Matcher ) ( [ ] string , annotations . Annotations , error ) {
return q . querier . LabelValues ( ctx , name , hints , matchers ... )
}
func ( q * HeadAndOOOQuerier ) LabelNames ( ctx context . Context , hints * storage . LabelHints , matchers ... * labels . Matcher ) ( [ ] string , annotations . Annotations , error ) {
return q . querier . LabelNames ( ctx , hints , matchers ... )
}
func ( q * HeadAndOOOQuerier ) Close ( ) error {
q . chunkr . Close ( )
return q . querier . Close ( )
}
func ( q * HeadAndOOOQuerier ) Select ( ctx context . Context , sortSeries bool , hints * storage . SelectHints , matchers ... * labels . Matcher ) storage . SeriesSet {
return selectSeriesSet ( ctx , sortSeries , hints , matchers , q . index , q . chunkr , q . head . tombstones , q . mint , q . maxt )
}
// HeadAndOOOChunkQuerier queries both the head and the out-of-order head.
type HeadAndOOOChunkQuerier struct {
mint , maxt int64
head * Head
index IndexReader
chunkr ChunkReader
querier storage . ChunkQuerier
}
func NewHeadAndOOOChunkQuerier ( mint , maxt int64 , head * Head , oooIsoState * oooIsolationState , querier storage . ChunkQuerier ) storage . ChunkQuerier {
cr := & headChunkReader {
head : head ,
mint : mint ,
maxt : maxt ,
isoState : head . iso . State ( mint , maxt ) ,
}
return & HeadAndOOOChunkQuerier {
mint : mint ,
maxt : maxt ,
head : head ,
index : NewHeadAndOOOIndexReader ( head , mint , maxt , oooIsoState . minRef ) ,
chunkr : NewHeadAndOOOChunkReader ( head , mint , maxt , cr , oooIsoState , 0 ) ,
querier : querier ,
}
}
func ( q * HeadAndOOOChunkQuerier ) LabelValues ( ctx context . Context , name string , hints * storage . LabelHints , matchers ... * labels . Matcher ) ( [ ] string , annotations . Annotations , error ) {
return q . querier . LabelValues ( ctx , name , hints , matchers ... )
}
func ( q * HeadAndOOOChunkQuerier ) LabelNames ( ctx context . Context , hints * storage . LabelHints , matchers ... * labels . Matcher ) ( [ ] string , annotations . Annotations , error ) {
return q . querier . LabelNames ( ctx , hints , matchers ... )
}
func ( q * HeadAndOOOChunkQuerier ) Close ( ) error {
q . chunkr . Close ( )
return q . querier . Close ( )
}
func ( q * HeadAndOOOChunkQuerier ) Select ( ctx context . Context , sortSeries bool , hints * storage . SelectHints , matchers ... * labels . Matcher ) storage . ChunkSeriesSet {
return selectChunkSeriesSet ( ctx , sortSeries , hints , matchers , rangeHeadULID , q . index , q . chunkr , q . head . tombstones , q . mint , q . maxt )
}