mirror of https://github.com/prometheus/prometheus
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
110 lines
3.7 KiB
110 lines
3.7 KiB
/* Copyright 2013 Prometheus Team |
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
|
* you may not use this file except in compliance with the License. |
|
* You may obtain a copy of the License at |
|
* |
|
* http://www.apache.org/licenses/LICENSE-2.0 |
|
* |
|
* Unless required by applicable law or agreed to in writing, software |
|
* distributed under the License is distributed on an "AS IS" BASIS, |
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
* See the License for the specific language governing permissions and |
|
* limitations under the License. */ |
|
|
|
%{ |
|
package rules |
|
|
|
import ( |
|
"fmt" |
|
"strconv" |
|
"strings" |
|
|
|
clientmodel "github.com/prometheus/client_golang/model" |
|
) |
|
|
|
// Lex is called by the parser generated by "go tool yacc" to obtain each |
|
// token. The method is opened before the matching rules block and closed at |
|
// the end of the file. |
|
func (lexer *RulesLexer) Lex(lval *yySymType) int { |
|
// Internal lexer states. |
|
const ( |
|
S_INITIAL = iota |
|
S_COMMENTS |
|
) |
|
|
|
// We simulate multiple start symbols for closely-related grammars via dummy tokens. See |
|
// http://www.gnu.org/software/bison/manual/html_node/Multiple-start_002dsymbols.html |
|
// Reason: we want to be able to parse lists of named rules as well as single expressions. |
|
if lexer.startToken != 0 { |
|
startToken := lexer.startToken |
|
lexer.startToken = 0 |
|
return startToken |
|
} |
|
|
|
c := lexer.current |
|
currentState := 0 |
|
|
|
if lexer.empty { |
|
c, lexer.empty = lexer.getChar(), false |
|
} |
|
|
|
%} |
|
|
|
D [0-9] |
|
L [a-zA-Z_] |
|
M [a-zA-Z_:] |
|
U [smhdwy] |
|
|
|
%x S_COMMENTS |
|
|
|
%yyc c |
|
%yyn c = lexer.getChar() |
|
%yyt currentState |
|
|
|
%% |
|
lexer.buf = lexer.buf[:0] // The code before the first rule executed before every scan cycle (rule #0 / state 0 action) |
|
|
|
"/*" currentState = S_COMMENTS |
|
<S_COMMENTS>"*/" currentState = S_INITIAL |
|
<S_COMMENTS>.|\n /* ignore chars within multi-line comments */ |
|
|
|
\/\/[^\r\n]*\n /* gobble up one-line comments */ |
|
|
|
ALERT|alert return ALERT |
|
IF|if return IF |
|
FOR|for return FOR |
|
WITH|with return WITH |
|
SUMMARY|summary return SUMMARY |
|
DESCRIPTION|description return DESCRIPTION |
|
|
|
PERMANENT|permanent return PERMANENT |
|
BY|by return GROUP_OP |
|
KEEPING_EXTRA|keeping_extra return KEEPING_EXTRA |
|
AVG|SUM|MAX|MIN|COUNT lval.str = lexer.token(); return AGGR_OP |
|
avg|sum|max|min|count lval.str = strings.ToUpper(lexer.token()); return AGGR_OP |
|
\<|>|AND|OR|and|or lval.str = strings.ToUpper(lexer.token()); return CMP_OP |
|
==|!=|>=|<=|=~|!~ lval.str = lexer.token(); return CMP_OP |
|
[+\-] lval.str = lexer.token(); return ADDITIVE_OP |
|
[*/%] lval.str = lexer.token(); return MULT_OP |
|
|
|
{D}+{U} lval.str = lexer.token(); return DURATION |
|
{L}({L}|{D})* lval.str = lexer.token(); return IDENTIFIER |
|
{M}({M}|{D})* lval.str = lexer.token(); return METRICNAME |
|
|
|
\-?{D}+(\.{D}*)? num, err := strconv.ParseFloat(lexer.token(), 64); |
|
if (err != nil && err.(*strconv.NumError).Err == strconv.ErrSyntax) { |
|
panic("Invalid float") |
|
} |
|
lval.num = clientmodel.SampleValue(num) |
|
return NUMBER |
|
|
|
\"(\\.|[^\\"])*\" lval.str = lexer.token()[1:len(lexer.token()) - 1]; return STRING |
|
\'(\\.|[^\\'])*\' lval.str = lexer.token()[1:len(lexer.token()) - 1]; return STRING |
|
|
|
[{}\[\]()=,] return int(lexer.buf[0]) |
|
[\t\n\r ] /* gobble up any whitespace */ |
|
%% |
|
|
|
lexer.empty = true |
|
return int(c) |
|
}
|
|
|