Update sre_parser from cpython 3.13.2 source

pull/148/head
Shen-Ta Hsieh 2025-03-19 12:03:58 +08:00
parent 2c789d9684
commit 262941d9de
6 changed files with 1423 additions and 1131 deletions

View File

@ -4,7 +4,7 @@ import random
import itertools
from cached_property import cached_property
import gixy.core.sre_parse.sre_parse as sre_parse
from .sre_parse import _parser
LOG = logging.getLogger(__name__)
@ -22,28 +22,28 @@ FIX_NAMED_GROUPS_RE = re.compile(r"(?<!\\)\(\?(?:<|')(\w+)(?:>|')")
CATEGORIES = {
# TODO(buglloc): unicode?
sre_parse.CATEGORY_SPACE: sre_parse.WHITESPACE,
sre_parse.CATEGORY_NOT_SPACE: _build_reverse_list(sre_parse.WHITESPACE),
sre_parse.CATEGORY_DIGIT: sre_parse.DIGITS,
sre_parse.CATEGORY_NOT_DIGIT: _build_reverse_list(sre_parse.DIGITS),
sre_parse.CATEGORY_WORD: frozenset('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789_'),
sre_parse.CATEGORY_NOT_WORD: _build_reverse_list(frozenset('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789_')),
sre_parse.CATEGORY_LINEBREAK: frozenset('\n'),
sre_parse.CATEGORY_NOT_LINEBREAK: _build_reverse_list(frozenset('\n')),
_parser.CATEGORY_SPACE: _parser.WHITESPACE,
_parser.CATEGORY_NOT_SPACE: _build_reverse_list(_parser.WHITESPACE),
_parser.CATEGORY_DIGIT: _parser.DIGITS,
_parser.CATEGORY_NOT_DIGIT: _build_reverse_list(_parser.DIGITS),
_parser.CATEGORY_WORD: frozenset('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789_'),
_parser.CATEGORY_NOT_WORD: _build_reverse_list(frozenset('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789_')),
_parser.CATEGORY_LINEBREAK: frozenset('\n'),
_parser.CATEGORY_NOT_LINEBREAK: _build_reverse_list(frozenset('\n')),
'ANY': [chr(x) for x in range(1, 127) if x != 10]
}
CATEGORIES_NAMES = {
sre_parse.CATEGORY_DIGIT: r'\d',
sre_parse.CATEGORY_NOT_DIGIT: r'\D',
sre_parse.CATEGORY_SPACE: r'\s',
sre_parse.CATEGORY_NOT_SPACE: r'\S',
sre_parse.CATEGORY_WORD: r'\w',
sre_parse.CATEGORY_NOT_WORD: r'\W',
_parser.CATEGORY_DIGIT: r'\d',
_parser.CATEGORY_NOT_DIGIT: r'\D',
_parser.CATEGORY_SPACE: r'\s',
_parser.CATEGORY_NOT_SPACE: r'\S',
_parser.CATEGORY_WORD: r'\w',
_parser.CATEGORY_NOT_WORD: r'\W',
}
@ -55,22 +55,22 @@ def extract_groups(parsed, top=True):
if not token:
# Skip empty tokens
pass
elif token[0] == sre_parse.SUBPATTERN:
if isinstance(token[1][0], int):
# Captured group index can't be a string. E.g. for pattern "(?:la)" group name is "None"
result[token[1][0]] = token[1][1]
result.update(extract_groups(token[1][1], False))
elif token[0] == sre_parse.MIN_REPEAT:
elif token[0] == _parser.SUBPATTERN:
# (group, add_flags, del_flags, pattern)
if token[1][0] is not None:
result[token[1][0]] = token[1][3]
result.update(extract_groups(token[1][3], False))
elif token[0] == _parser.MIN_REPEAT:
result.update(extract_groups(token[1][2], False))
elif token[0] == sre_parse.MAX_REPEAT:
elif token[0] == _parser.MAX_REPEAT:
result.update(extract_groups(token[1][2], False))
elif token[0] == sre_parse.BRANCH:
elif token[0] == _parser.BRANCH:
result.update(extract_groups(token[1][1], False))
elif token[0] == sre_parse.SUBPATTERN:
result.update(extract_groups(token[1][1], False))
elif token[0] == sre_parse.IN:
elif token[0] == _parser.IN:
result.update(extract_groups(token[1], False))
elif isinstance(token, sre_parse.SubPattern):
elif token[0] == _parser.ATOMIC_GROUP:
result.update(extract_groups(token[1], False))
elif isinstance(token, _parser.SubPattern):
result.update(extract_groups(token, False))
return result
@ -155,7 +155,7 @@ class Token(object):
class AnyToken(Token):
type = sre_parse.ANY
type = _parser.ANY
def can_contain(self, char, skip_literal=True):
return char in CATEGORIES['ANY']
@ -174,7 +174,7 @@ class AnyToken(Token):
class LiteralToken(Token):
type = sre_parse.LITERAL
type = _parser.LITERAL
def _parse(self):
self.char = chr(self.token[1])
@ -195,7 +195,7 @@ class LiteralToken(Token):
class NotLiteralToken(Token):
type = sre_parse.NOT_LITERAL
type = _parser.NOT_LITERAL
def _parse(self):
self.char = chr(self.token[1])
@ -219,7 +219,7 @@ class NotLiteralToken(Token):
class RangeToken(Token):
type = sre_parse.RANGE
type = _parser.RANGE
def _parse(self):
self.left_code = self.token[1][0]
@ -244,7 +244,7 @@ class RangeToken(Token):
class CategoryToken(Token):
type = sre_parse.CATEGORY
type = _parser.CATEGORY
def _parse(self):
self.char_list = CATEGORIES.get(self.token[1], [''])
@ -267,7 +267,7 @@ class CategoryToken(Token):
class MinRepeatToken(Token):
type = sre_parse.MIN_REPEAT
type = _parser.MIN_REPEAT
def _parse(self):
self._parse_childs(self.token[1][2])
@ -336,15 +336,15 @@ class MinRepeatToken(Token):
return '{childs}{{{count}}}?'.format(childs=childs, count=self.min)
if self.min == 0 and self.max == 1:
return '{childs}?'.format(childs=childs)
if self.min == 0 and self.max == sre_parse.MAXREPEAT:
if self.min == 0 and self.max == _parser.MAXREPEAT:
return '{childs}*?'.format(childs=childs)
if self.min == 1 and self.max == sre_parse.MAXREPEAT:
if self.min == 1 and self.max == _parser.MAXREPEAT:
return '{childs}+?'.format(childs=childs)
return '{childs}{{{min},{max}}}?'.format(childs=childs, min=self.min, max=self.max)
class MaxRepeatToken(Token):
type = sre_parse.MAX_REPEAT
type = _parser.MAX_REPEAT
def _parse(self):
self._parse_childs(self.token[1][2])
@ -413,22 +413,22 @@ class MaxRepeatToken(Token):
return '{childs}{{{count}}}'.format(childs=childs, count=self.min)
if self.min == 0 and self.max == 1:
return '{childs}?'.format(childs=childs)
if self.min == 0 and self.max == sre_parse.MAXREPEAT:
if self.min == 0 and self.max == _parser.MAXREPEAT:
return '{childs}*'.format(childs=childs)
if self.min == 1 and self.max == sre_parse.MAXREPEAT:
if self.min == 1 and self.max == _parser.MAXREPEAT:
return '{childs}+'.format(childs=childs)
return '{childs}{{{min},{max}}}'.format(childs=childs, min=self.min, max=self.max)
class BranchToken(Token):
type = sre_parse.BRANCH
type = _parser.BRANCH
def _parse(self):
self.childs = []
for token in self.token[1][1]:
if not token:
self.childs.append(EmptyToken(token=token, parent=self.parent, regexp=self.regexp))
elif isinstance(token, sre_parse.SubPattern):
elif isinstance(token, _parser.SubPattern):
self.childs.append(InternalSubpatternToken(token=token, parent=self.parent, regexp=self.regexp))
else:
raise RuntimeError('Unexpected token {0} in branch'.format(token))
@ -464,13 +464,13 @@ class BranchToken(Token):
class SubpatternToken(Token):
type = sre_parse.SUBPATTERN
type = _parser.SUBPATTERN
def _parse(self):
self._parse_childs(self.token[1][1])
# (group, add_flags, del_flags, pattern)
self._parse_childs(self.token[1][3])
self.group = self.token[1][0]
if isinstance(self.group, int):
# Captured group index can't be a string. E.g. for pattern "(?:la)" group name is "None"
if self.group is not None:
self._reg_group(self.group)
def can_contain(self, char, skip_literal=True):
@ -540,7 +540,7 @@ class SubpatternToken(Token):
class InternalSubpatternToken(Token):
type = sre_parse.SUBPATTERN
type = _parser.SUBPATTERN
def _parse(self):
self._parse_childs(self.token)
@ -609,8 +609,41 @@ class InternalSubpatternToken(Token):
return ''.join(str(x) for x in self.childs)
class AtomicGroupToken(Token):
type = _parser.ATOMIC_GROUP
def _parse(self):
self._parse_childs(self.token[1])
def can_contain(self, char, skip_literal=True):
for child in self.childs:
if child.can_contain(char, skip_literal=skip_literal):
return True
return False
def must_contain(self, char):
return False
def can_startswith(self, char, strict=False):
return any(x.can_startswith(char, strict) for x in self.childs)
def must_startswith(self, char, strict=False):
return False
def generate(self, context):
res = []
for child in self.childs:
res.append(child.generate(context))
return _gen_combinator(res) + ['']
def __str__(self):
childs = ''.join(str(x) for x in self.childs)
return '(?>{childs})'.format(childs=childs)
class InToken(Token):
type = sre_parse.IN
type = _parser.IN
def _parse(self):
self.childs = parse(self.token[1], self)
@ -682,11 +715,11 @@ class InToken(Token):
class AtToken(Token):
type = sre_parse.AT
type = _parser.AT
def _parse(self):
self.begin = self.token[1] == sre_parse.AT_BEGINNING
self.end = self.token[1] == sre_parse.AT_END
self.begin = self.token[1] == _parser.AT_BEGINNING
self.end = self.token[1] == _parser.AT_END
def can_contain(self, char, skip_literal=True):
return False
@ -711,7 +744,7 @@ class AtToken(Token):
class NegateToken(Token):
type = sre_parse.NEGATE
type = _parser.NEGATE
def can_contain(self, char, skip_literal=True):
return False
@ -733,7 +766,7 @@ class NegateToken(Token):
class GroupRefToken(Token):
type = sre_parse.GROUPREF
type = _parser.GROUPREF
def _parse(self):
self.id = self.token[1]
@ -759,7 +792,7 @@ class GroupRefToken(Token):
class AssertToken(Token):
type = sre_parse.ASSERT
type = _parser.ASSERT
def can_contain(self, char, skip_literal=True):
# TODO(buglloc): Do it!
@ -777,7 +810,7 @@ class AssertToken(Token):
class AssertNotToken(Token):
type = sre_parse.ASSERT_NOT
type = _parser.ASSERT_NOT
def can_contain(self, char, skip_literal=True):
# TODO(buglloc): Do it!
@ -822,35 +855,37 @@ def parse(sre_obj, parent=None, regexp=None):
for token in sre_obj:
if not token:
result.append(EmptyToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.ANY:
elif token[0] == _parser.ANY:
result.append(AnyToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.LITERAL:
elif token[0] == _parser.LITERAL:
result.append(LiteralToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.NOT_LITERAL:
elif token[0] == _parser.NOT_LITERAL:
result.append(NotLiteralToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.RANGE:
elif token[0] == _parser.RANGE:
result.append(RangeToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.CATEGORY:
elif token[0] == _parser.CATEGORY:
result.append(CategoryToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.MIN_REPEAT:
elif token[0] == _parser.MIN_REPEAT:
result.append(MinRepeatToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.MAX_REPEAT:
elif token[0] == _parser.MAX_REPEAT:
result.append(MaxRepeatToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.BRANCH:
elif token[0] == _parser.BRANCH:
result.append(BranchToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.SUBPATTERN:
elif token[0] == _parser.SUBPATTERN:
result.append(SubpatternToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.IN:
elif token[0] == _parser.ATOMIC_GROUP:
result.append(AtomicGroupToken(token=token, parent=parent, regexp=regexp))
elif token[0] == _parser.IN:
result.append(InToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.NEGATE:
elif token[0] == _parser.NEGATE:
result.append(NegateToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.AT:
elif token[0] == _parser.AT:
result.append(AtToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.GROUPREF:
elif token[0] == _parser.GROUPREF:
result.append(GroupRefToken(token=token, parent=parent, regexp=regexp))
elif token[0] == sre_parse.ASSERT:
elif token[0] == _parser.ASSERT:
pass # TODO(buglloc): Do it!
elif token[0] == sre_parse.ASSERT_NOT:
elif token[0] == _parser.ASSERT_NOT:
pass # TODO(buglloc): Do it!
else:
LOG.info('Unexpected token "{0}"'.format(token[0]))
@ -996,13 +1031,10 @@ class Regexp(object):
@cached_property
def groups(self):
# self.root.parse()
result = {}
# for name, token in self._groups.items():
# result[name] = Regexp(str(self), root=token, strict=True, case_sensitive=self.case_sensitive)
for name, parsed in extract_groups(self.parsed).items():
result[name] = Regexp('compiled', _parsed=parsed, strict=True, case_sensitive=self.case_sensitive)
for name, group in self.parsed.pattern.groupdict.items():
for name, group in self.parsed.state.groupdict.items():
result[name] = result[group]
return result
@ -1022,8 +1054,8 @@ class Regexp(object):
return self._parsed
try:
self._parsed = sre_parse.parse(FIX_NAMED_GROUPS_RE.sub('(?P<\\1>', self.source))
except sre_parse.error as e:
self._parsed = _parser.parse(FIX_NAMED_GROUPS_RE.sub('(?P<\\1>', self.source))
except _parser.error as e:
LOG.fatal('Failed to parse regex: %s (%s)', self.source, str(e))
raise e

View File

@ -0,0 +1,230 @@
# flake8: noqa
# Copied from cpython 3.13.2
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20230612
from _sre import MAXREPEAT, MAXGROUPS
# SRE standard exception (access as sre.error)
# should this really be here?
class PatternError(Exception):
"""Exception raised for invalid regular expressions.
Attributes:
msg: The unformatted error message
pattern: The regular expression pattern
pos: The index in the pattern where compilation failed (may be None)
lineno: The line corresponding to pos (may be None)
colno: The column corresponding to pos (may be None)
"""
__module__ = 're'
def __init__(self, msg, pattern=None, pos=None):
self.msg = msg
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
msg = '%s at position %d' % (msg, pos)
if isinstance(pattern, str):
newline = '\n'
else:
newline = b'\n'
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
if newline in pattern:
msg = '%s (line %d, column %d)' % (msg, self.lineno, self.colno)
else:
self.lineno = self.colno = None
super().__init__(msg)
# Backward compatibility after renaming in 3.13
error = PatternError
class _NamedIntConstant(int):
def __new__(cls, value, name):
self = super(_NamedIntConstant, cls).__new__(cls, value)
self.name = name
return self
def __repr__(self):
return self.name
__reduce__ = None
MAXREPEAT = _NamedIntConstant(MAXREPEAT, 'MAXREPEAT')
def _makecodes(*names):
items = [_NamedIntConstant(i, name) for i, name in enumerate(names)]
globals().update({item.name: item for item in items})
return items
# operators
OPCODES = _makecodes(
# failure=0 success=1 (just because it looks better that way :-)
'FAILURE', 'SUCCESS',
'ANY', 'ANY_ALL',
'ASSERT', 'ASSERT_NOT',
'AT',
'BRANCH',
'CATEGORY',
'CHARSET', 'BIGCHARSET',
'GROUPREF', 'GROUPREF_EXISTS',
'IN',
'INFO',
'JUMP',
'LITERAL',
'MARK',
'MAX_UNTIL',
'MIN_UNTIL',
'NOT_LITERAL',
'NEGATE',
'RANGE',
'REPEAT',
'REPEAT_ONE',
'SUBPATTERN',
'MIN_REPEAT_ONE',
'ATOMIC_GROUP',
'POSSESSIVE_REPEAT',
'POSSESSIVE_REPEAT_ONE',
'GROUPREF_IGNORE',
'IN_IGNORE',
'LITERAL_IGNORE',
'NOT_LITERAL_IGNORE',
'GROUPREF_LOC_IGNORE',
'IN_LOC_IGNORE',
'LITERAL_LOC_IGNORE',
'NOT_LITERAL_LOC_IGNORE',
'GROUPREF_UNI_IGNORE',
'IN_UNI_IGNORE',
'LITERAL_UNI_IGNORE',
'NOT_LITERAL_UNI_IGNORE',
'RANGE_UNI_IGNORE',
# The following opcodes are only occurred in the parser output,
# but not in the compiled code.
'MIN_REPEAT', 'MAX_REPEAT',
)
del OPCODES[-2:] # remove MIN_REPEAT and MAX_REPEAT
# positions
ATCODES = _makecodes(
'AT_BEGINNING', 'AT_BEGINNING_LINE', 'AT_BEGINNING_STRING',
'AT_BOUNDARY', 'AT_NON_BOUNDARY',
'AT_END', 'AT_END_LINE', 'AT_END_STRING',
'AT_LOC_BOUNDARY', 'AT_LOC_NON_BOUNDARY',
'AT_UNI_BOUNDARY', 'AT_UNI_NON_BOUNDARY',
)
# categories
CHCODES = _makecodes(
'CATEGORY_DIGIT', 'CATEGORY_NOT_DIGIT',
'CATEGORY_SPACE', 'CATEGORY_NOT_SPACE',
'CATEGORY_WORD', 'CATEGORY_NOT_WORD',
'CATEGORY_LINEBREAK', 'CATEGORY_NOT_LINEBREAK',
'CATEGORY_LOC_WORD', 'CATEGORY_LOC_NOT_WORD',
'CATEGORY_UNI_DIGIT', 'CATEGORY_UNI_NOT_DIGIT',
'CATEGORY_UNI_SPACE', 'CATEGORY_UNI_NOT_SPACE',
'CATEGORY_UNI_WORD', 'CATEGORY_UNI_NOT_WORD',
'CATEGORY_UNI_LINEBREAK', 'CATEGORY_UNI_NOT_LINEBREAK',
)
# replacement operations for "ignore case" mode
OP_IGNORE = {
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE,
}
OP_LOCALE_IGNORE = {
LITERAL: LITERAL_LOC_IGNORE,
NOT_LITERAL: NOT_LITERAL_LOC_IGNORE,
}
OP_UNICODE_IGNORE = {
LITERAL: LITERAL_UNI_IGNORE,
NOT_LITERAL: NOT_LITERAL_UNI_IGNORE,
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set

File diff suppressed because it is too large Load Diff

View File

@ -1,226 +0,0 @@
# flake8: noqa
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
try:
from _sre import MAXREPEAT
except ImportError:
import _sre
MAXREPEAT = _sre.MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode locale
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set

View File

@ -1,827 +0,0 @@
# flake8: noqa
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
from .sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
self.lookbehind = 0
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error(("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid)))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = MAXREPEAT - 1
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + i * av[0]
hi = hi + j * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error("bogus escape: %s" % repr("\\" + escape))
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error("bogus escape: %s" % repr(escape))
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
if state.lookbehind:
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
RuntimeWarning)
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set) == 1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set) == 2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name %r" %
name)
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in backref group name "
"%r" % name)
gid = state.groupdict.get(name)
if gid is None:
msg = "unknown group name: {0!r}".format(name)
raise error(msg)
if state.lookbehind:
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
RuntimeWarning)
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
state.lookbehind += 1
p = _parse_sub(source, state)
if dir < 0:
state.lookbehind -= 1
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
msg = "unknown group name: {0!r}".format(condname)
raise error(msg)
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
if state.lookbehind:
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
RuntimeWarning)
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
if flags & SRE_FLAG_DEBUG:
p.dump()
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
msg = "unknown group name: {0!r}".format(name)
raise IndexError(msg)
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)

View File

@ -97,6 +97,7 @@ def test_to_string():
(r'1*', '1*'),
(r'1*?', '1*?'),
(r'1+', '1+'),
(r'(?>abc)def', '(?>abc)def'),
)
for case in cases:
regexp, string = case