Commit 5d073f10 authored by eckhart's avatar eckhart
Browse files

examples updated and bugfixes

parent c124120f
......@@ -108,14 +108,14 @@ from DHParser import start_logging, suspend_logging, resume_logging, is_filename
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \\
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \\
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
replace_by_children, remove_empty, remove_tokens, flatten, \\
replace_by_children, remove_empty, remove_tokens, flatten, all_of, any_of, \\
merge_adjacent, collapse, collapse_children_if, transform_content, WHITESPACE_PTYPE, \\
TOKEN_PTYPE, remove_children, remove_content, remove_brackets, change_tag_name, \\
remove_anonymous_tokens, keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \\
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \\
transform_content, replace_content_with, forbid, assert_content, remove_infix_operator, \\
add_error, error_on, recompile_grammar, left_associative, lean_left, set_config_value, \\
get_config_value, XML_SERIALIZATION, SXPRESSION_SERIALIZATION, node_maker, any_of, \\
get_config_value, XML_SERIALIZATION, SXPRESSION_SERIALIZATION, node_maker, \\
INDENTED_SERIALIZATION, JSON_SERIALIZATION, access_thread_locals, access_presets, \\
finalize_presets, ErrorCode, RX_NEVER_MATCH, set_tracer, resume_notices_on, \\
trace_history, has_descendant, neg, has_ancestor, optional_last_value, insert, \\
......
......@@ -129,6 +129,7 @@ PARSER_LOOKAHEAD_FAILURE_ONLY = ErrorCode(1030)
PARSER_STOPPED_BEFORE_END = ErrorCode(1040)
PARSER_LOOKAHEAD_MATCH_ONLY = ErrorCode(1045)
CAPTURE_STACK_NOT_EMPTY = ErrorCode(1050)
AUTORETRIEVED_SYMBOL_NOT_CLEARED = ErrorCode(1055)
MALFORMED_ERROR_STRING = ErrorCode(1060)
AMBIGUOUS_ERROR_HANDLING = ErrorCode(1070)
REDEFINED_DIRECTIVE = ErrorCode(1080)
......
......@@ -42,7 +42,7 @@ from DHParser.error import Error, ErrorCode, is_error, MANDATORY_CONTINUATION, \
MALFORMED_ERROR_STRING, MANDATORY_CONTINUATION_AT_EOF, DUPLICATE_PARSERS_IN_ALTERNATIVE, \
CAPTURE_WITHOUT_PARSERNAME, CAPTURE_DROPPED_CONTENT_WARNING, LOOKAHEAD_WITH_OPTIONAL_PARSER, \
BADLY_NESTED_OPTIONAL_PARSER, BAD_ORDER_OF_ALTERNATIVES, BAD_MANDATORY_SETUP, \
OPTIONAL_REDUNDANTLY_NESTED_WARNING, CAPTURE_STACK_NOT_EMPTY, BAD_REPETITION_COUNT
OPTIONAL_REDUNDANTLY_NESTED_WARNING, CAPTURE_STACK_NOT_EMPTY, BAD_REPETITION_COUNT, AUTORETRIEVED_SYMBOL_NOT_CLEARED
from DHParser.log import CallItem, HistoryRecord
from DHParser.preprocess import BEGIN_TOKEN, END_TOKEN, RX_TOKEN_NAME
from DHParser.stringview import StringView, EMPTY_STRING_VIEW
......@@ -711,6 +711,26 @@ def is_parser_placeholder(parser: Optional[Parser]) -> bool:
return not parser or parser.ptype == ":Parser"
# functions for analysing the parser tree/graph ###
def has_non_autocaptured_symbols(context: List[Parser]) -> Optional[bool]:
"""Returns True, if the context contains a Capture-Parser that is not
shielded by a Retrieve-Parser. This is the case for captured symbols
that are not "auto-captured" by a Retrieve-Parser.
"""
for parser in context:
if parser.ptype == ":Retrieve":
break
elif parser.ptype == ":Capture":
p = cast(UnaryParser, parser).parser
while p.ptype in (":Synonym", ":Forward"):
p = cast(UnaryParser, p).parser
if not isinstance(p, Retrieve):
return True
return None
########################################################################
#
# Grammar class, central administration of all parser of a grammar
......@@ -1394,7 +1414,10 @@ class Grammar:
if any(self.variables__.values()):
error_msg = "Capture-stack not empty after end of parsing: " \
+ ', '.join(k for k, i in self.variables__.items() if len(i) >= 1)
error_code = CAPTURE_STACK_NOT_EMPTY
if parser.apply(has_non_autocaptured_symbols):
error_code = CAPTURE_STACK_NOT_EMPTY
else:
error_code = AUTORETRIEVED_SYMBOL_NOT_CLEARED
if result:
if result.children:
# add another child node at the end to ensure that the position
......@@ -1500,7 +1523,7 @@ class Grammar:
Checks the parser tree statically for possible errors.
This function is called by the constructor of class Grammar and does
not need to be called externally.
not need to (and should not) be called externally.
:return: a list of error-tuples consisting of the narrowest containing
named parser (i.e. the symbol on which the failure occurred),
......
......@@ -39,11 +39,12 @@ from typing import Dict, List, Union, cast
from DHParser.configuration import get_config_value
from DHParser.error import Error, is_error, adjust_error_locations, PARSER_LOOKAHEAD_MATCH_ONLY, \
PARSER_LOOKAHEAD_FAILURE_ONLY, MANDATORY_CONTINUATION_AT_EOF
PARSER_LOOKAHEAD_FAILURE_ONLY, MANDATORY_CONTINUATION_AT_EOF, AUTORETRIEVED_SYMBOL_NOT_CLEARED
from DHParser.log import is_logging, clear_logs, local_log_dir, log_parsing_history
from DHParser.parse import UnknownParserError, Parser, Lookahead
from DHParser.syntaxtree import Node, RootNode, parse_tree, flatten_sxpr, ZOMBIE_TAG
from DHParser.trace import set_tracer, all_descendants, trace_history
from DHParser.transform import traverse, remove_children
from DHParser.toolkit import load_if_file, re
__all__ = ('unit_from_config',
......@@ -125,7 +126,7 @@ def unit_from_config(config_str, filename):
# unit.setdefault(symbol, OD()).setdefault(stage, OD())[testkey] = testcode
test = unit.setdefault(symbol, OD()).setdefault(stage, OD())
assert testkey.strip('*') not in test and (testkey.strip('*') + '*') not in test, \
"Key %s already exists in text %s:%s !" % (testkey, stage, symbol)
'"%s": Key %s already exists in %s:%s !' % (filename, testkey, stage, symbol)
test[testkey] = testcode
pos = eat_comments(cfg, entry_match.span()[1])
entry_match = RX_ENTRY.match(cfg, pos)
......@@ -269,7 +270,8 @@ def get_report(test_unit) -> str:
POSSIBLE_ARTIFACTS = frozenset((
PARSER_LOOKAHEAD_MATCH_ONLY,
PARSER_LOOKAHEAD_FAILURE_ONLY,
MANDATORY_CONTINUATION_AT_EOF
MANDATORY_CONTINUATION_AT_EOF,
AUTORETRIEVED_SYMBOL_NOT_CLEARED
))
......@@ -351,7 +353,7 @@ def grammar_unit(test_unit, parser_factory, transformer_factory, report='REPORT'
raw_errors = cast(RootNode, syntax_tree).errors_sorted
is_artifact = ({e.code for e in raw_errors}
<= {PARSER_LOOKAHEAD_FAILURE_ONLY,
# PARSER_STOPPED_BEFORE_END,
AUTORETRIEVED_SYMBOL_NOT_CLEARED,
PARSER_LOOKAHEAD_MATCH_ONLY}
or (len(raw_errors) == 1
and (raw_errors[-1].code == PARSER_LOOKAHEAD_MATCH_ONLY
......@@ -432,6 +434,7 @@ def grammar_unit(test_unit, parser_factory, transformer_factory, report='REPORT'
if "ast" in tests or report:
ast = copy.deepcopy(cst)
old_errors = set(ast.errors)
traverse(ast, {'*': remove_children({'__TESTING_ARTIFACT__'})})
transform(ast)
tests.setdefault('__ast__', {})[test_name] = ast
ast_errors = [e for e in ast.errors if e not in old_errors]
......@@ -466,8 +469,6 @@ def grammar_unit(test_unit, parser_factory, transformer_factory, report='REPORT'
if "ast" in tests and len(errata) == errflag:
compare = parse_tree(get(tests, "ast", test_name))
if compare:
from DHParser.transform import traverse, remove_children
traverse(ast, {'*': remove_children({'__TESTING_ARTIFACT__'})})
traverse(compare, {'*': remove_children({'__TESTING_ARTIFACT__'})})
if not compare.equals(ast):
errata.append('Abstract syntax tree test "%s" for parser "%s" failed:'
......
......@@ -77,8 +77,8 @@ def trace_history(self: Parser, text: StringView) -> Tuple[Optional[Node], Strin
line_col(grammar.document_lbreaks__, mre.error.pos), errors))
grammar.call_stack__.append(
((self.repr if self.tag_name in (REGEXP_PTYPE, TOKEN_PTYPE, ":Retrieve", ":Pop")
else (self.pname or self.tag_name)), location))
(((' ' + self.repr) if self.tag_name in (REGEXP_PTYPE, TOKEN_PTYPE, ":Retrieve", ":Pop")
else (self.pname or self.tag_name)), location)) # ' ' added to avoid ':' as first char!
grammar.moving_forward__ = True
try:
......
......@@ -29,7 +29,7 @@ from DHParser import start_logging, is_filename, load_if_file, \
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_empty, remove_tokens, flatten, is_insignificant_whitespace, is_empty, \
remove_empty, remove_tokens, flatten, is_empty, \
collapse, collapse_children_if, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_children, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \
keep_children, is_one_of, not_one_of, has_content, apply_if, \
......
......@@ -29,7 +29,7 @@ from DHParser import start_logging, is_filename, load_if_file, \
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_empty, remove_tokens, flatten, is_insignificant_whitespace, is_empty, lean_left, \
remove_empty, remove_tokens, flatten, is_empty, lean_left, \
collapse, collapse_children_if, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_children, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \
keep_children, is_one_of, not_one_of, has_content, apply_if, set_tracer, trace_history, \
......
......@@ -29,7 +29,7 @@ from DHParser import start_logging, is_filename, load_if_file, \
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \
remove_empty, remove_tokens, flatten, \
collapse, collapse_children_if, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_children, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \
keep_children, is_one_of, not_one_of, has_content, apply_if, \
......
......@@ -29,7 +29,7 @@ from DHParser import start_logging, is_filename, load_if_file, \
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_empty, remove_tokens, flatten, is_insignificant_whitespace, is_empty, \
remove_empty, remove_tokens, flatten, is_empty, \
collapse, collapse_children_if, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_children, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \
keep_children, is_one_of, not_one_of, has_content, apply_if, \
......
......@@ -28,7 +28,7 @@ from DHParser import is_filename, load_if_file, \
Node, TransformationDict, Whitespace, \
traverse, remove_children_if, is_anonymous, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \
remove_empty, remove_tokens, flatten, \
is_empty, collapse, remove_children, remove_content, remove_brackets, change_tag_name, \
keep_children, is_one_of, has_content, apply_if, \
WHITESPACE_PTYPE, TOKEN_PTYPE, THREAD_LOCALS
......
#!/usr/bin/env python3
#######################################################################
#
# SYMBOLS SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
import collections
from functools import partial
import os
import sys
try:
scriptpath = os.path.dirname(__file__)
except NameError:
scriptpath = ''
dhparser_parentdir = os.path.abspath(os.path.join(scriptpath, r'../..'))
if scriptpath not in sys.path:
sys.path.append(scriptpath)
if dhparser_parentdir not in sys.path:
sys.path.append(dhparser_parentdir)
try:
import regex as re
except ImportError:
import re
from DHParser import start_logging, suspend_logging, resume_logging, is_filename, load_if_file, \
Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, Drop, \
Lookbehind, Lookahead, Alternative, Pop, Text, Synonym, Interleave, \
Unordered, Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \
ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \
grammar_changed, last_value, matching_bracket, PreprocessorFunc, is_empty, remove_if, \
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
replace_by_children, remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \
merge_adjacent, collapse, collapse_children_if, replace_content, WHITESPACE_PTYPE, \
TOKEN_PTYPE, remove_children, remove_content, remove_brackets, change_tag_name, \
remove_anonymous_tokens, keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \
replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \
add_error, error_on, recompile_grammar, left_associative, lean_left, set_config_value, \
get_config_value, XML_SERIALIZATION, SXPRESSION_SERIALIZATION, \
COMPACT_SERIALIZATION, JSON_SERIALIZATION, access_thread_locals, access_presets, \
finalize_presets, ErrorCode, RX_NEVER_MATCH, set_tracer, resume_notices_on, \
trace_history, has_descendant, neg, has_ancestor, optional_last_value
#######################################################################
#
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def ArithmeticPreprocessor(text):
return text, lambda i: i
def get_preprocessor() -> PreprocessorFunc:
return ArithmeticPreprocessor
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class ArithmeticGrammar(Grammar):
r"""Parser for an Arithmetic source file.
"""
expression = Forward()
source_hash__ = "07647bacdecf589000642f353c3813a1"
anonymous__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*'
comment_rx__ = re.compile(COMMENT__)
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wsp__ = Whitespace(WSP_RE__)
dwsp__ = Drop(Whitespace(WSP_RE__))
VARIABLE = Series(RegExp('[A-Za-z]'), dwsp__)
NUMBER = Series(RegExp('(?:0|(?:[1-9]\\d*))(?:\\.\\d+)?'), dwsp__)
group = Series(Series(Drop(Text("(")), dwsp__), expression, Series(Drop(Text(")")), dwsp__))
sign = Alternative(Drop(Text("+")), Drop(Text("-")))
factor = Series(Option(sign), Alternative(NUMBER, VARIABLE, group), ZeroOrMore(Alternative(VARIABLE, group)))
term = Series(factor, ZeroOrMore(Series(Alternative(Series(Drop(Text("*")), dwsp__), Series(Drop(Text("/")), dwsp__)), factor)))
expression.set(Series(term, ZeroOrMore(Series(Alternative(Series(Drop(Text("+")), dwsp__), Series(Drop(Text("-")), dwsp__)), term))))
root__ = expression
def get_grammar() -> ArithmeticGrammar:
"""Returns a thread/process-exclusive ArithmeticGrammar-singleton."""
THREAD_LOCALS = access_thread_locals()
try:
grammar = THREAD_LOCALS.Arithmetic_00000001_grammar_singleton
except AttributeError:
THREAD_LOCALS.Arithmetic_00000001_grammar_singleton = ArithmeticGrammar()
if hasattr(get_grammar, 'python_src__'):
THREAD_LOCALS.Arithmetic_00000001_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = THREAD_LOCALS.Arithmetic_00000001_grammar_singleton
if get_config_value('resume_notices'):
resume_notices_on(grammar)
elif get_config_value('history_tracking'):
set_tracer(grammar, trace_history)
return grammar
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
Arithmetic_AST_transformation_table = {
# AST Transformations for the Arithmetic-grammar
"<": flatten,
"expression": [],
"term": [],
"factor": [],
"sign": [],
"group": [],
"NUMBER": [],
"VARIABLE": [],
"*": replace_by_single_child
}
def CreateArithmeticTransformer() -> TransformationFunc:
"""Creates a transformation function that does not share state with other
threads or processes."""
return partial(traverse, processing_table=Arithmetic_AST_transformation_table.copy())
def get_transformer() -> TransformationFunc:
"""Returns a thread/process-exclusive transformation function."""
THREAD_LOCALS = access_thread_locals()
try:
transformer = THREAD_LOCALS.Arithmetic_00000001_transformer_singleton
except AttributeError:
THREAD_LOCALS.Arithmetic_00000001_transformer_singleton = CreateArithmeticTransformer()
transformer = THREAD_LOCALS.Arithmetic_00000001_transformer_singleton
return transformer
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
class ArithmeticCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a Arithmetic source file.
"""
def __init__(self):
super(ArithmeticCompiler, self).__init__()
def reset(self):
super().reset()
# initialize your variables here, not in the constructor!
def on_expression(self, node):
return self.fallback_compiler(node)
# def on_term(self, node):
# return node
# def on_factor(self, node):
# return node
# def on_sign(self, node):
# return node
# def on_group(self, node):
# return node
# def on_NUMBER(self, node):
# return node
# def on_VARIABLE(self, node):
# return node
def get_compiler() -> ArithmeticCompiler:
"""Returns a thread/process-exclusive ArithmeticCompiler-singleton."""
THREAD_LOCALS = access_thread_locals()
try:
compiler = THREAD_LOCALS.Arithmetic_00000001_compiler_singleton
except AttributeError:
THREAD_LOCALS.Arithmetic_00000001_compiler_singleton = ArithmeticCompiler()
compiler = THREAD_LOCALS.Arithmetic_00000001_compiler_singleton
return compiler
#######################################################################
#
# END OF DHPARSER-SECTIONS
#
#######################################################################
def compile_src(source):
"""Compiles ``source`` and returns (result, errors, ast).
"""
result_tuple = compile_source(source, get_preprocessor(), get_grammar(), get_transformer(),
get_compiler())
return result_tuple
if __name__ == "__main__":
# recompile grammar if needed
grammar_path = os.path.abspath(__file__).replace('Parser.py', '.ebnf')
parser_update = False
def notify():
global parser_update
parser_update = True
print('recompiling ' + grammar_path)
if os.path.exists(grammar_path):
if not recompile_grammar(grammar_path, force=False, notify=notify):
error_file = os.path.basename(__file__).replace('Parser.py', '_ebnf_ERRORS.txt')
with open(error_file, encoding="utf-8") as f:
print(f.read())
sys.exit(1)
elif parser_update:
print(os.path.basename(__file__) + ' has changed. '
'Please run again in order to apply updated compiler')
sys.exit(0)
else:
print('Could not check whether grammar requires recompiling, '
'because grammar was not found at: ' + grammar_path)
if len(sys.argv) > 1:
# compile file
file_name, log_dir = sys.argv[1], ''
if file_name in ['-d', '--debug'] and len(sys.argv) > 2:
file_name, log_dir = sys.argv[2], 'LOGS'
set_config_value('history_tracking', True)
set_config_value('resume_notices', True)
set_config_value('log_syntax_trees', set(('cst', 'ast')))
start_logging(log_dir)
result, errors, _ = compile_src(file_name)
if errors:
cwd = os.getcwd()
rel_path = file_name[len(cwd):] if file_name.startswith(cwd) else file_name
for error in errors:
print(rel_path + ':' + str(error))
sys.exit(1)
else:
print(result.serialize() if isinstance(result, Node) else result)
else:
print("Usage: ArithmeticParser.py [FILENAME]")
#!/usr/bin/env python3
#######################################################################
#
# SYMBOLS SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
import collections
from functools import partial
import os
import sys
dhparser_parentdir = os.path.abspath(r'../..')
if dhparser_parentdir not in sys.path:
sys.path.append(dhparser_parentdir)
try:
import regex as re
except ImportError:
import re
from DHParser import start_logging, suspend_logging, resume_logging, is_filename, load_if_file, \
Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, Drop, \
Lookbehind, Lookahead, Alternative, Pop, Text, Synonym, AllOf, SomeOf, \
Unordered, Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \
ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \
grammar_changed, last_value, matching_bracket, PreprocessorFunc, is_empty, remove_if, \
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
replace_by_children, remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \
merge_adjacent, collapse, collapse_children_if, replace_content, WHITESPACE_PTYPE, \
TOKEN_PTYPE, remove_children, remove_content, remove_brackets, change_tag_name, \
remove_anonymous_tokens, keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \
replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \
add_error, error_on, recompile_grammar, left_associative, lean_left, set_config_value, \
chain, get_config_value, XML_SERIALIZATION, SXPRESSION_SERIALIZATION, \
COMPACT_SERIALIZATION, JSON_SERIALIZATION, access_thread_locals, access_presets, \
finalize_presets, ErrorCode, RX_NEVER_MATCH, set_tracer, resume_notices_on, \
trace_history, has_descendant, neg, has_ancestor
#######################################################################
#
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def ClassicEBNFPreprocessor(text):
return text, lambda i: i
def get_preprocessor() -> PreprocessorFunc:
return ClassicEBNFPreprocessor
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class ClassicEBNFGrammar(Grammar):
r"""Parser for a ClassicEBNF source file.
"""
expression = Forward()
source_hash__ = "a7929c507e1b8319071d18dc9eaccdf7"
anonymous__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*(?:\n|$)'
comment_rx__ = re.compile(COMMENT__)
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wsp__ = Whitespace(WSP_RE__)
dwsp__ = Drop(Whitespace(WSP_RE__))
EOF = NegativeLookahead(RegExp('.'))
whitespace = Series(RegExp('~'), dwsp__)
regexp = Series(RegExp('/(?:(?<!\\\\)\\\\(?:/)|[^/])*?/'), dwsp__)
plaintext = Series(RegExp('`(?:(?<!\\\\)\\\\`|[^`])*?`'), dwsp__)
literal = Alternative(Series(RegExp('"(?:(?<!\\\\)\\\\"|[^"])*?"'), dwsp__), Series(RegExp("'(?:(?<!\\\\)\\\\'|[^'])*?'"), dwsp__))
symbol = Series(RegExp('(?!\\d)\\w+'), dwsp__)
option = Series(Series(Text("["), dwsp__), expression, Series(Text("]"), dwsp__), mandatory=1)
repetition = Series(Series(Text("{"), dwsp__), expression, Series(Text("}"), dwsp__), mandatory=1)
oneormore = Series(Series(Text("{"), dwsp__), expression, Series(Text("}+"), dwsp__))
unordered = Series(Series(Text("<"), dwsp__), expression, Series(Text(">"), dwsp__), mandatory=1)
group = Series(Series(Text("("), dwsp__), expression, Series(Text(")"), dwsp__), mandatory=1)
retrieveop = Alternative(Series(Text("::"), dwsp__), Series(Text(":"), dwsp__))
flowmarker = Alternative(Series(Text("!"), dwsp__), Series(Text("&"), dwsp__), Series(Text("-!"), dwsp__), Series(Text("-&"), dwsp__))
factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Series(Text("="), dwsp__))), Series(Option(flowmarker), literal), Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp), Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
term = OneOrMore(Series(Option(Series(Text("§"), dwsp__)), factor))
expression.set(Series(term, ZeroOrMore(Series(Series(Text("|"), dwsp__), term))))
directive = Series(Series(Text("@"), dwsp__), symbol, Series(Text("="), dwsp__), Alternative(regexp, literal, symbol), ZeroOrMore(Series(Series(Text(","), dwsp__), Alternative(regexp, literal, symbol))), mandatory=1)
definition = Series(symbol, Series(Text("="), dwsp__), expression, mandatory=1)
syntax = Series(Option(Series(dwsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
root__ = syntax
def get_grammar() -> ClassicEBNFGrammar:
"""Returns a thread/process-exclusive ClassicEBNFGrammar-singleton."""
THREAD_LOCALS = access_thread_locals()
try:
grammar = THREAD_LOCALS.ClassicEBNF_00000001_grammar_singleton
except AttributeError:
THREAD_LOCALS.ClassicEBNF_00000001_grammar_singleton = ClassicEBNFGrammar()
if hasattr(get_grammar, 'python_src__'):
THREAD_LOCALS.ClassicEBNF_00000001_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = THREAD_LOCALS.ClassicEBNF_00000001_grammar_singleton
if get_config_value('resume_notices'):
resume_notices_on(grammar)
elif get_config_value('history_tracking'):
set_tracer(grammar, trace_history)
return grammar
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
ClassicEBNF_AST_transformation_table = {
# AST Transformations for EBNF-grammar
"<":
[remove_empty], # remove_whitespace
"syntax":
[], # otherwise '"*": replace_by_single_child' would be applied
"directive, definition":
[flatten