mirror of https://github.com/prometheus/prometheus
Improve PromQL parser performance by making it non-concurrent (#6356)
Before this commit, the PromQL parser ran in two goroutines: * The lexer goroutine that splits the input into tokens and sent them over a channel to * the parser goroutine which produces the abstract syntax tree The Problem with this approach is that the parser spends more time on goroutine creation and syncronisation than on actual parsing. This commit removes that concurrency and replaces the channel by a slice based buffer. Benchmarks show that this makes the up to 7 times faster than before. Signed-off-by: Tobias Guggenmos <tguggenm@redhat.com>pull/6357/head
parent
e2dd5b61ef
commit
ac3932ea35
|
@ -323,7 +323,7 @@ type lexer struct {
|
|||
start Pos // Start position of this item.
|
||||
width Pos // Width of last rune read from input.
|
||||
lastPos Pos // Position of most recent item returned by nextItem.
|
||||
items chan item // Channel of scanned items.
|
||||
items []item // Slice buffer of scanned items.
|
||||
|
||||
parenDepth int // Nesting depth of ( ) exprs.
|
||||
braceOpen bool // Whether a { is opened.
|
||||
|
@ -362,7 +362,7 @@ func (l *lexer) backup() {
|
|||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t ItemType) {
|
||||
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
||||
l.items = append(l.items, item{t, l.start, l.input[l.start:l.pos]})
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
|
@ -408,13 +408,21 @@ func (l *lexer) linePosition() int {
|
|||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{ItemError, l.start, fmt.Sprintf(format, args...)}
|
||||
l.items = append(l.items, item{ItemError, l.start, fmt.Sprintf(format, args...)})
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
for len(l.items) == 0 {
|
||||
if l.state != nil {
|
||||
l.state = l.state(l)
|
||||
} else {
|
||||
l.emit(ItemEOF)
|
||||
}
|
||||
}
|
||||
item := l.items[0]
|
||||
l.items = l.items[1:]
|
||||
l.lastPos = item.pos
|
||||
return item
|
||||
}
|
||||
|
@ -423,9 +431,8 @@ func (l *lexer) nextItem() item {
|
|||
func lex(input string) *lexer {
|
||||
l := &lexer{
|
||||
input: input,
|
||||
items: make(chan item),
|
||||
state: lexStatements,
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
|
@ -434,7 +441,6 @@ func (l *lexer) run() {
|
|||
for l.state = lexStatements; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
close(l.items)
|
||||
}
|
||||
|
||||
// Release resources used by lexer.
|
||||
|
|
|
@ -695,15 +695,11 @@ func TestLexer(t *testing.T) {
|
|||
for i, test := range typ.tests {
|
||||
l := &lexer{
|
||||
input: test.input,
|
||||
items: make(chan item),
|
||||
seriesDesc: test.seriesDesc,
|
||||
}
|
||||
go l.run()
|
||||
l.run()
|
||||
|
||||
out := []item{}
|
||||
for it := range l.items {
|
||||
out = append(out, it)
|
||||
}
|
||||
out := l.items
|
||||
|
||||
lastItem := out[len(out)-1]
|
||||
if test.fail {
|
||||
|
|
|
@ -1741,9 +1741,6 @@ func TestRecoverParserRuntime(t *testing.T) {
|
|||
|
||||
defer func() {
|
||||
testutil.Equals(t, err, errUnexpected)
|
||||
|
||||
_, ok := <-p.lex.items
|
||||
testutil.Assert(t, !ok, "lex.items was not closed")
|
||||
}()
|
||||
defer p.recover(&err)
|
||||
// Cause a runtime panic.
|
||||
|
|
Loading…
Reference in New Issue