Cleanups and adds performance regression.

pull/84/head
Matt T. Proud 2013-03-04 11:43:07 -08:00
parent f39b9c3c8e
commit d5380897c3
4 changed files with 73 additions and 74 deletions

View File

@ -99,7 +99,7 @@ func main() {
go func() { go func() {
ticker := time.Tick(time.Second) ticker := time.Tick(time.Second)
for i := 0; i < 5; i++ { for i := 0; i < 120; i++ {
<-ticker <-ticker
if i%10 == 0 { if i%10 == 0 {
fmt.Printf(".") fmt.Printf(".")
@ -109,7 +109,7 @@ func main() {
//f := model.NewFingerprintFromRowKey("9776005627788788740-g-131-0") //f := model.NewFingerprintFromRowKey("9776005627788788740-g-131-0")
f := model.NewFingerprintFromRowKey("09923616460706181007-g-131-0") f := model.NewFingerprintFromRowKey("09923616460706181007-g-131-0")
v := metric.NewViewRequestBuilder() v := metric.NewViewRequestBuilder()
v.GetMetricAtTime(f, time.Now().Add(-30*time.Second)) v.GetMetricAtTime(f, time.Now().Add(-120*time.Second))
view, err := ts.MakeView(v, time.Minute) view, err := ts.MakeView(v, time.Minute)
fmt.Println(view, err) fmt.Println(view, err)

View File

@ -28,6 +28,7 @@ import (
"io" "io"
"log" "log"
"sort" "sort"
"sync"
"time" "time"
) )
@ -223,25 +224,25 @@ func (l *LevelDBMetricPersistence) AppendSamples(samples model.Samples) (err err
} }
// Begin the sorting of grouped samples. // Begin the sorting of grouped samples.
var (
sortingSemaphore := make(chan bool, sortConcurrency) sortingSemaphore = make(chan bool, sortConcurrency)
doneSorting := make(chan bool, len(fingerprintToSamples)) doneSorting = sync.WaitGroup{}
)
for i := 0; i < sortConcurrency; i++ { for i := 0; i < sortConcurrency; i++ {
sortingSemaphore <- true sortingSemaphore <- true
} }
for _, samples := range fingerprintToSamples { for _, samples := range fingerprintToSamples {
doneSorting.Add(1)
go func(samples model.Samples) { go func(samples model.Samples) {
<-sortingSemaphore <-sortingSemaphore
sort.Sort(samples) sort.Sort(samples)
sortingSemaphore <- true sortingSemaphore <- true
doneSorting <- true doneSorting.Done()
}(samples) }(samples)
} }
for i := 0; i < len(fingerprintToSamples); i++ { doneSorting.Wait()
<-doneSorting
}
var ( var (
absentFingerprints = map[model.Fingerprint]model.Samples{} absentFingerprints = map[model.Fingerprint]model.Samples{}

View File

@ -101,7 +101,7 @@ func (s getMetricRangeOperations) Swap(i, j int) {
s[i], s[j] = s[j], s[i] s[i], s[j] = s[j], s[i]
} }
// Sorts getMetricRangeOperation according duration in descending order. // Sorts getMetricRangeOperation according to duration in descending order.
type rangeDurationSorter struct { type rangeDurationSorter struct {
getMetricRangeOperations getMetricRangeOperations
} }
@ -176,15 +176,13 @@ func collectIntervals(ops ops) (intervals map[time.Duration]getValuesAtIntervalO
intervals = make(map[time.Duration]getValuesAtIntervalOps) intervals = make(map[time.Duration]getValuesAtIntervalOps)
for _, operation := range ops { for _, operation := range ops {
intervalOp, ok := operation.(getValuesAtIntervalOp) switch t := operation.(type) {
if !ok { case getValuesAtIntervalOp:
continue operations, _ := intervals[t.interval]
operations = append(operations, t)
intervals[t.interval] = operations
} }
operations, _ := intervals[intervalOp.interval]
operations = append(operations, intervalOp)
intervals[intervalOp.interval] = operations
} }
for _, operations := range intervals { for _, operations := range intervals {
@ -197,9 +195,9 @@ func collectIntervals(ops ops) (intervals map[time.Duration]getValuesAtIntervalO
// Selects and returns all operations that are getValuesAlongRangeOp operations. // Selects and returns all operations that are getValuesAlongRangeOp operations.
func collectRanges(ops ops) (ranges getMetricRangeOperations) { func collectRanges(ops ops) (ranges getMetricRangeOperations) {
for _, operation := range ops { for _, operation := range ops {
op, ok := operation.(getValuesAlongRangeOp) switch t := operation.(type) {
if ok { case getValuesAlongRangeOp:
ranges = append(ranges, op) ranges = append(ranges, t)
} }
} }
@ -208,6 +206,11 @@ func collectRanges(ops ops) (ranges getMetricRangeOperations) {
return return
} }
// optimizeForward iteratively scans operations and peeks ahead to subsequent
// ones to find candidates that can either be removed or truncated through
// simplification. For instance, if a range query happens to overlap a get-a-
// value-at-a-certain-point-request, the range query should flatten and subsume
// the other.
func optimizeForward(pending ops) (out ops) { func optimizeForward(pending ops) (out ops) {
if len(pending) == 0 { if len(pending) == 0 {
return return
@ -219,79 +222,75 @@ func optimizeForward(pending ops) (out ops) {
pending = pending[1:len(pending)] pending = pending[1:len(pending)]
if _, ok := firstOperation.(getValuesAtTimeOp); ok { switch t := firstOperation.(type) {
case getValuesAtTimeOp:
out = ops{firstOperation} out = ops{firstOperation}
tail := optimizeForward(pending) tail := optimizeForward(pending)
return append(out, tail...) return append(out, tail...)
}
// If the last value was a scan at a given frequency along an interval, case getValuesAtIntervalOp:
// several optimizations may exist. // If the last value was a scan at a given frequency along an interval,
if operation, ok := firstOperation.(getValuesAtIntervalOp); ok { // several optimizations may exist.
for _, peekOperation := range pending { for _, peekOperation := range pending {
if peekOperation.StartsAt().After(operation.Through()) { if peekOperation.StartsAt().After(t.Through()) {
break break
} }
// If the type is not a range request, we can't do anything. // If the type is not a range request, we can't do anything.
rangeOperation, ok := peekOperation.(getValuesAlongRangeOp) switch next := peekOperation.(type) {
if !ok { case getValuesAlongRangeOp:
continue if !next.Through().After(t.Through()) {
} var (
before = getValuesAtIntervalOp(t)
after = getValuesAtIntervalOp(t)
)
if !rangeOperation.Through().After(operation.Through()) { before.through = next.from
var (
before = getValuesAtIntervalOp(operation)
after = getValuesAtIntervalOp(operation)
)
before.through = rangeOperation.from // Truncate the get value at interval request if a range request cuts
// it off somewhere.
var (
from = next.from
)
// Truncate the get value at interval request if a range request cuts for {
// it off somewhere. from = from.Add(t.interval)
var (
t = rangeOperation.from
)
for { if from.After(next.through) {
t = t.Add(operation.interval) after.from = from
break
if t.After(rangeOperation.through) { }
after.from = t
break
} }
pending = append(ops{before, after}, pending...)
sort.Sort(pending)
return optimizeForward(pending)
} }
pending = append(ops{before, after}, pending...)
sort.Sort(pending)
return optimizeForward(pending)
} }
} }
}
if operation, ok := firstOperation.(getValuesAlongRangeOp); ok { case getValuesAlongRangeOp:
for _, peekOperation := range pending { for _, peekOperation := range pending {
if peekOperation.StartsAt().After(operation.Through()) { if peekOperation.StartsAt().After(t.Through()) {
break break
} }
switch next := peekOperation.(type) {
// All values at a specific time may be elided into the range query. // All values at a specific time may be elided into the range query.
if _, ok := peekOperation.(getValuesAtTimeOp); ok { case getValuesAtTimeOp:
pending = pending[1:len(pending)] pending = pending[1:len(pending)]
continue continue
} case getValuesAlongRangeOp:
// Range queries should be concatenated if they overlap.
// Range queries should be concatenated if they overlap.
if rangeOperation, ok := peekOperation.(getValuesAlongRangeOp); ok {
pending = pending[1:len(pending)] pending = pending[1:len(pending)]
if rangeOperation.Through().After(operation.Through()) { if next.Through().After(t.Through()) {
operation.through = rangeOperation.through t.through = next.through
var ( var (
head = ops{operation} head = ops{t}
tail = pending tail = pending
) )
@ -299,22 +298,20 @@ func optimizeForward(pending ops) (out ops) {
return optimizeForward(pending) return optimizeForward(pending)
} }
} case getValuesAtIntervalOp:
if intervalOperation, ok := peekOperation.(getValuesAtIntervalOp); ok {
pending = pending[1:len(pending)] pending = pending[1:len(pending)]
if intervalOperation.through.After(operation.Through()) { if next.through.After(t.Through()) {
var ( var (
t = intervalOperation.from t = next.from
) )
for { for {
t = t.Add(intervalOperation.interval) t = t.Add(next.interval)
if t.After(intervalOperation.through) { if t.After(next.through) {
intervalOperation.from = t next.from = t
pending = append(ops{intervalOperation}, pending...) pending = append(ops{next}, pending...)
return optimizeForward(pending) return optimizeForward(pending)
} }
@ -322,6 +319,7 @@ func optimizeForward(pending ops) (out ops) {
} }
} }
} }
} }
// Strictly needed? // Strictly needed?

View File

@ -28,7 +28,7 @@ var (
leveldbUseParanoidChecks = flag.Bool("leveldbUseParanoidChecks", true, "Whether LevelDB uses expensive checks (bool).") leveldbUseParanoidChecks = flag.Bool("leveldbUseParanoidChecks", true, "Whether LevelDB uses expensive checks (bool).")
) )
// LevelDBPersistence is an disk-backed sorted key-value store. // LevelDBPersistence is a disk-backed sorted key-value store.
type LevelDBPersistence struct { type LevelDBPersistence struct {
cache *levigo.Cache cache *levigo.Cache
filterPolicy *levigo.FilterPolicy filterPolicy *levigo.FilterPolicy