Commit 8fe50d7d authored by eckhart's avatar eckhart
Browse files

code clean-up

parent cdf046ce
No preview for this file type
# The default ``config.py``
# flake8: noqa
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git', '.tox']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
# If `True`, rope will insert new module imports as
# `from <package> import <module>` by default.
prefs['prefer_module_from_imports'] = False
# If `True`, rope will transform a comma list of imports into
# multiple separate import statements when organizing
# imports.
prefs['split_imports'] = False
# If `True`, rope will remove all top-level import statements and
# reinsert them at the top of the module when making changes.
prefs['pull_imports_to_top'] = True
# If `True`, rope will sort imports alphabetically by module name instead of
# alphabetically by import statement, with from imports after normal
# imports.
prefs['sort_imports_alphabetically'] = False
# Location of implementation of rope.base.oi.type_hinting.interfaces.ITypeHintingFactory
# In general case, you don't have to change this value, unless you're an rope expert.
# Change this value to inject you own implementations of interfaces
# listed in module rope.base.oi.type_hinting.providers.interfaces
# For example, you can add you own providers for Django Models, or disable the search
# type-hinting in a class hierarchy, etc.
prefs['type_hinting_factory'] = 'rope.base.oi.type_hinting.factory.default_type_hinting_factory'
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -37,7 +37,7 @@ compiler object.
import copy
import functools
import os
from typing import Any, Optional, Tuple, List, Union, Callable, cast
from typing import Any, Optional, Tuple, List, Set, Union, Callable, cast
from DHParser.configuration import get_config_value
from DHParser.preprocess import with_source_mapping, PreprocessorFunc, SourceMapFunc
......@@ -128,8 +128,8 @@ class Compiler:
self.context = [] # type: List[Node]
self._None_check = True # type: bool
self._dirty_flag = False
self._debug = get_config_value('debug_compiler') # type: bool
self._debug_already_compiled = set() # type: Set[Node]
self._debug = get_config_value('debug_compiler') # type: bool
self._debug_already_compiled = set() # type: Set[Node]
self.finalizers = [] # type: List[Callable, Tuple]
def prepare(self) -> None:
......@@ -168,17 +168,6 @@ class Compiler:
self.finalize()
return result
# Obsolete, because never used...
# def compile_children(self, node: Node) -> StrictResultType:
# """Compiles all children of the given node and returns the tuple
# of the compiled children or the node's (potentially empty) result
# in case the node does not have any children.
# """
# if node.children:
# return tuple(self.compile(child) for child in node.children)
# else:
# return node.result
def fallback_compiler(self, node: Node) -> Any:
"""This is a generic compiler function which will be called on
all those node types for which no compiler method `on_XXX` has
......
......@@ -29,7 +29,7 @@ this is desired in the CONFIG_PRESET dictionary right after the start of the
program and before any DHParser-function is invoked.
"""
from typing import Dict, Hashable, Any
from typing import Dict, Any
__all__ = ('access_presets',
'finalize_presets',
......@@ -175,7 +175,7 @@ def set_config_value(key: str, value: Any):
except AttributeError:
THREAD_LOCALS.config = dict()
cfg = THREAD_LOCALS.config
THREAD_LOCALS.config[key] = value
cfg[key] = value
########################################################################
......
......@@ -264,7 +264,8 @@ def grammar_provider(ebnf_src: str, branding="DSL") -> Grammar:
append_log(log_name, grammar_src)
else:
print(grammar_src)
grammar_factory = compile_python_object(DHPARSER_IMPORTS + grammar_src, r'get_(?:\w+_)?grammar$')
grammar_factory = compile_python_object(DHPARSER_IMPORTS + grammar_src,
r'get_(?:\w+_)?grammar$')
grammar_factory.python_src__ = grammar_src
return grammar_factory
......@@ -300,7 +301,7 @@ def load_compiler_suite(compiler_suite: str) -> \
# Assume source is an ebnf grammar.
# Is there really any reasonable application case for this?
log_dir = suspend_logging()
compiler_py, messages, n = compile_source(source, None, get_ebnf_grammar(),
compiler_py, messages, _ = compile_source(source, None, get_ebnf_grammar(),
get_ebnf_transformer(),
get_ebnf_compiler(compiler_suite, source))
resume_logging(log_dir)
......@@ -334,7 +335,7 @@ def is_outdated(compiler_suite: str, grammar_source: str) -> bool:
True, if ``compiler_suite`` seems to be out of date.
"""
try:
n1, grammar, n2, n3 = load_compiler_suite(compiler_suite)
_, grammar, _, _ = load_compiler_suite(compiler_suite)
return grammar_changed(grammar(), grammar_source)
except ValueError:
return True
......@@ -558,8 +559,8 @@ def recompile_grammar(ebnf_filename, force=False,
compiler_name = base + 'Compiler.py'
error_file_name = base + '_ebnf_ERRORS.txt'
messages = [] # type: Iterable[Error]
if (not os.path.exists(compiler_name) or force or
grammar_changed(compiler_name, ebnf_filename)):
if (not os.path.exists(compiler_name) or force
or grammar_changed(compiler_name, ebnf_filename)):
notify()
messages = compile_on_disk(ebnf_filename)
if messages:
......
......@@ -92,9 +92,9 @@ from DHParser import start_logging, suspend_logging, resume_logging, is_filename
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \\
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
replace_by_children, remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \\
merge_adjacent, collapse, collapse_children_if, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
remove_nodes, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \\
keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \\
merge_adjacent, collapse, collapse_children_if, replace_content, WHITESPACE_PTYPE, \\
TOKEN_PTYPE, remove_nodes, remove_content, remove_brackets, change_tag_name, \\
remove_anonymous_tokens, keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \\
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \\
replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \\
error_on, recompile_grammar, left_associative, lean_left, set_config_value, \\
......@@ -131,10 +131,14 @@ class EBNFGrammar(Grammar):
r"""
Parser for an EBNF source file, with this grammar:
@ comment = /#.*(?:\n|$)/ # comments start with '#' and eat all chars up to and including '\n'
@ whitespace = /\s*/ # whitespace includes linefeed
@ literalws = right # trailing whitespace of literals will be ignored tacitly
@ drop = whitespace # do not include whitespace in concrete syntax tree
# comments start with '#' and eat all chars up to and including '\n'
@ comment = /#.*(?:\n|$)/
# whitespace includes linefeed
@ whitespace = /\s*/
# trailing whitespace of literals will be ignored tacitly
@ literalws = right
@ drop = whitespace # do not include whitespace in concrete syntax tree
#: top-level
......@@ -145,8 +149,9 @@ class EBNFGrammar(Grammar):
#: components
expression = term { "|" term }
term = { ["§"] factor }+ # "§" means all following factors mandatory
factor = [flowmarker] [retrieveop] symbol !"=" # negative lookahead to be sure it's not a definition
term = { ["§"] factor }+ # "§" means all following factors mandatory
# negative lookahead !"=" to be sure it's not a definition
factor = [flowmarker] [retrieveop] symbol !"="
| [flowmarker] literal
| [flowmarker] plaintext
| [flowmarker] regexp
......@@ -159,14 +164,14 @@ class EBNFGrammar(Grammar):
#: flow-operators
flowmarker = "!" | "&" # '!' negative lookahead, '&' positive lookahead
| "-!" | "-&" # '-' negative lookbehind, '-&' positive lookbehind
retrieveop = "::" | ":" # '::' pop, ':' retrieve
flowmarker = "!" | "&" # '!' negative lookahead, '&' positive lookahead
| "-!" | "-&" # '-' negative lookbehind, '-&' positive lookbehind
retrieveop = "::" | ":" # '::' pop, ':' retrieve
#: groups
group = "(" §expression ")"
unordered = "<" §expression ">" # elements of expression in arbitrary order
unordered = "<" §expression ">" # elements of expression in arbitrary order
oneormore = "{" expression "}+"
repetition = "{" §expression "}"
option = "[" §expression "]"
......@@ -175,7 +180,7 @@ class EBNFGrammar(Grammar):
symbol = /(?!\d)\w+/~ # e.g. expression, factor, parameter_list
literal = /"(?:(?<!\\)\\"|[^"])*?"/~ # e.g. "(", '+', 'while'
| /'(?:(?<!\\)\\'|[^'])*?'/~ # whitespace following literals will be ignored tacitly.
| /'(?:(?<!\\)\\'|[^'])*?'/~ # ignore whitespace following literals
plaintext = /`(?:(?<!\\)\\`|[^`])*?`/~ # like literal but does not eat whitespace
regexp = /\/(?:(?<!\\)\\(?:\/)|[^\/])*?\//~ # e.g. /\w+/, ~/#.*(?:\n|$)/~
whitespace = /~/~ # insignificant whitespace
......@@ -198,25 +203,33 @@ class EBNFGrammar(Grammar):
Series(RegExp("'(?:(?<!\\\\)\\\\'|[^'])*?'"), wsp__))
symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__),
mandatory=1)
oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__),
mandatory=1)
group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
NegativeLookahead(Series(Token("="), wsp__))),
Series(Option(flowmarker), literal),
Series(Option(flowmarker), plaintext),
Series(Option(flowmarker), regexp),
Series(Option(flowmarker), whitespace),
Series(Option(flowmarker), oneormore),
Series(Option(flowmarker), group),
Series(Option(flowmarker), unordered), repetition, option)
term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
Alternative(regexp, literal, symbol),
ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
ZeroOrMore(Series(Series(Token(","), wsp__),
Alternative(regexp, literal, symbol))), mandatory=1)
definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
syntax = Series(Option(Series(wsp__, RegExp(''))),
ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
root__ = syntax
......@@ -939,7 +952,8 @@ class EBNFCompiler(Compiler):
# prepare parser class header and docstring and
# add EBNF grammar to the doc string of the parser class
article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a ' # what about 'hour', 'universe' etc.?
article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '
# what about 'hour', 'universe' etc.?
show_source = get_config_value('add_grammar_source_to_parser_docstring')
declarations = ['class ' + self.grammar_name
+ 'Grammar(Grammar):',
......@@ -1009,7 +1023,8 @@ class EBNFCompiler(Compiler):
return self._result
## compilation methods
# compilation methods ###
def on_syntax(self, node: Node) -> str:
definitions = [] # type: List[Tuple[str, str]]
......@@ -1040,7 +1055,7 @@ class EBNFCompiler(Compiler):
except NameError:
pass # undefined name in the grammar are already caught and reported
except GrammarError as error:
for sym, prs, err in error.errors:
for sym, _, err in error.errors:
symdef_node = self.rules[sym][0]
err.pos = self.rules[sym][0].pos
self.tree.add_error(symdef_node, err)
......@@ -1078,7 +1093,8 @@ class EBNFCompiler(Compiler):
# assume it's a synonym, like 'page = REGEX_PAGE_NR'
defn = 'Synonym(%s)' % defn
# if self.drop_flag:
# defn = 'Drop(%s)' % defn # TODO: Recursively drop all contained parsers for optimization
# defn = 'Drop(%s)' % defn
# TODO: Recursively drop all contained parsers for optimization
except TypeError as error:
from traceback import extract_tb
trace = str(extract_tb(error.__traceback__)[-1])
......@@ -1199,8 +1215,8 @@ class EBNFCompiler(Compiler):
+ ' must be defined before the symbol!')
if node.children[1 if len(node.children) == 2 else 2].tag_name != 'literal':
self.tree.new_error(
node, 'Directive "%s" requires message string or a a pair ' % key +
'(regular expression or search string, message string) as argument!')
node, 'Directive "%s" requires message string or a a pair ' % key
+ '(regular expression or search string, message string) as argument!')
if len(node.children) == 2:
error_msgs.append(('', unrepr(node.children[1].content)))
elif len(node.children) == 3:
......@@ -1254,7 +1270,7 @@ class EBNFCompiler(Compiler):
and DROP_REGEXP in self.directives.drop and self.context[-2].tag_name == "definition"
and all((arg.startswith('Drop(RegExp(') or arg.startswith('Drop(Token(')
or arg in EBNFCompiler.COMMENT_OR_WHITESPACE) for arg in arguments)):
arguments = [arg.replace('Drop(', '').replace('))', ')') for arg in arguments]
arguments = [arg.replace('Drop(', '').replace('))', ')') for arg in arguments]
if self.drop_flag:
return 'Drop(' + parser_class + '(' + ', '.join(arguments) + '))'
else:
......@@ -1505,7 +1521,7 @@ def get_ebnf_compiler(grammar_name="", grammar_source="") -> EBNFCompiler:
#
########################################################################
def compile_ebnf(ebnf_source: str, branding: str = 'DSL', preserve_AST: bool=False) \
def compile_ebnf(ebnf_source: str, branding: str = 'DSL', preserve_AST: bool = False) \
-> Tuple[Optional[Any], List[Error], Optional[Node]]:
"""
Compiles an `ebnf_source` (file_name or EBNF-string) and returns
......@@ -1518,5 +1534,4 @@ def compile_ebnf(ebnf_source: str, branding: str = 'DSL', preserve_AST: bool=Fal
get_ebnf_grammar(),
get_ebnf_transformer(),
get_ebnf_compiler(branding, ebnf_source),
preserve_AST = preserve_AST)
preserve_AST=preserve_AST)
......@@ -37,7 +37,7 @@ the string representations of the error objects. For example::
import bisect
from typing import Iterable, Iterator, Union, Dict, Tuple, List
from typing import Iterable, Iterator, Union, Tuple, List
from DHParser.preprocess import SourceMapFunc
from DHParser.stringview import StringView
......@@ -232,17 +232,6 @@ def line_col(lbreaks: List[int], pos: int) -> Tuple[int, int]:
return line, column
# def line_col(text: Union[StringView, str], pos: int) -> Tuple[int, int]:
# """
# Returns the position within a text as (line, column)-tuple.
# """
# if pos < 0 or add_pos > len(text): # one character behind EOF is still an allowed position!
# raise ValueError('Position %i outside text of length %s !' % (pos, len(text)))
# line = text.count("\n", 0, pos) + 1
# column = pos - text.rfind("\n", 0, add_pos)
# return line, column
def adjust_error_locations(errors: List[Error],
original_text: Union[StringView, str],
source_mapping: SourceMapFunc = lambda i: i):
......
......@@ -52,10 +52,9 @@ Example::
"""
import collections
import contextlib
import html
import os
from typing import List, Tuple, Union, Optional
from typing import List, Tuple, Union
from DHParser.configuration import access_presets, finalize_presets, get_config_value, \
set_config_value
......@@ -103,7 +102,7 @@ def suspend_logging() -> str:
return save
def resume_logging(log_dir: str=''):
def resume_logging(log_dir: str = ''):
"""Resumes logging in the current thread with the given log-dir."""
if not 'log_dir':
CFG = access_presets()
......@@ -155,7 +154,7 @@ def log_dir(path: str = "") -> str:
return dirname
def is_logging(thread_local_query: bool=True) -> bool:
def is_logging(thread_local_query: bool = True) -> bool:
"""-> True, if logging is turned on."""
if thread_local_query:
return bool(get_config_value('log_dir'))
......@@ -293,7 +292,8 @@ class HistoryRecord:
errors: List[Error] = []) -> None:
# copy call stack, dropping uninformative Forward-Parsers
# self.call_stack = call_stack # type: Tuple[Tuple[str, int],...]
self.call_stack = tuple((tn, pos) for tn, pos in call_stack if tn != ":Forward") # type: Tuple[Tuple[str, int],...]
self.call_stack = tuple((tn, pos) for tn, pos in call_stack
if tn != ":Forward") # type: Tuple[Tuple[str, int],...]
self.node = node # type: Node
self.text = text # type: StringView
self.line_col = line_col # type: Tuple[int, int]
......@@ -355,7 +355,8 @@ class HistoryRecord:
classes[idx['text']] = 'failtext'
else: # ERROR
stack += '<br/>\n"%s"' % self.err_msg()
tpl = self.Snapshot(str(self.line_col[0]), str(self.line_col[1]), stack, status, excerpt) # type: Tuple[str, str, str, str, str]
tpl = self.Snapshot(str(self.line_col[0]), str(self.line_col[1]),
stack, status, excerpt) # type: Tuple[str, str, str, str, str]
return ''.join(['<tr>'] + [('<td class="%s">%s</td>' % (cls, item))
for cls, item in zip(classes, tpl)] + ['</tr>'])
......@@ -469,6 +470,7 @@ def log_ST(syntax_tree, log_file_name) -> bool:
return True
return False
LOG_SIZE_THRESHOLD = 10000 # maximum number of history records to log
LOG_TAIL_THRESHOLD = 500 # maximum number of history recors for "tail log"
......@@ -542,4 +544,3 @@ def log_parsing_history(grammar, log_file_name: str = '', html: bool = True) ->
heading = '<h1>Last 500 records of parsing history of "%s"</h1>' % log_file_name + lead_in
write_log([heading] + full_history[-LOG_TAIL_THRESHOLD:], log_file_name + '_full.tail')
return True
......@@ -32,7 +32,7 @@ for an example.
from collections import defaultdict
import copy
from typing import Callable, cast, List, Tuple, Sequence, Set, Dict, \
from typing import Callable, cast, List, Tuple, Set, Dict, \
DefaultDict, Union, Optional, Any
from DHParser.configuration import get_config_value
......@@ -432,20 +432,17 @@ class Parser:
self.tag_name,
(Node(ZOMBIE_TAG, text[:gap]).with_pos(location), pe.node) + tail) \
.with_pos(location)
# self._add_resume_notice(rest, node)
elif pe.first_throw:
# TODO: Will this option be needed, if history tracking is deferred to module "trace"?
# if history_tracking__: grammar.call_stack__.pop()
# TODO: Is this case still needed with module "trace"?
raise ParserError(pe.node, pe.rest, pe.error, first_throw=False)
elif grammar.tree__.errors[-1].code == Error.MANDATORY_CONTINUATION_AT_EOF:
node = Node(self.tag_name, pe.node).with_pos(location) # try to create tree as faithful as possible
# try to create tree as faithful as possible
node = Node(self.tag_name, pe.node).with_pos(location)
else:
result = (Node(ZOMBIE_TAG, text[:gap]).with_pos(location), pe.node) if gap \
else pe.node # type: ResultType
# if history_tracking__: grammar.call_stack__.pop()
raise ParserError(Node(self.tag_name, result).with_pos(location),
text, pe.error, first_throw=False)
# self._add_resume_notice(rest, node)
grammar.most_recent_error__ = pe # needed for history tracking
if left_recursion_depth__:
......@@ -482,7 +479,8 @@ class Parser:
# because caching would interfere with changes of variable state
# - in case of left recursion, the first recursive step that
# matches will store its result in the cache
# TODO: need a unit-test concerning interference of variable manipulation and left recursion algorithm?
# TODO: need a unit-test concerning interference of variable manipulation
# and left recursion algorithm?
visited[location] = (node, rest)
except RecursionError:
......@@ -1004,8 +1002,8 @@ class Grammar:
else:
self.comment_rx__ = RX_NEVER_MATCH
else:
assert ((self.__class__.COMMENT__ and
self.__class__.COMMENT__ == self.comment_rx__.pattern)
assert ((self.__class__.COMMENT__
and self.__class__.COMMENT__ == self.comment_rx__.pattern)
or (not self.__class__.COMMENT__ and self.comment_rx__ == RX_NEVER_MATCH))
self.start_parser__ = None # type: Optional[Parser]
self._dirty_flag__ = False # type: bool
......@@ -1100,8 +1098,8 @@ class Grammar:
"""
if parser.pname:
# prevent overwriting instance variables or parsers of a different class
assert parser.pname not in self.__dict__ or \
isinstance(self.__dict__[parser.pname], parser.__class__), \
assert (parser.pname not in self.__dict__
or isinstance(self.__dict__[parser.pname], parser.__class__)), \
('Cannot add parser "%s" because a field with the same name '
'already exists in grammar object: %s!'
% (parser.pname, str(self.__dict__[parser.pname])))
......@@ -1151,12 +1149,13 @@ class Grammar:
# for tn, pos in h.call_stack:
# if is_lookahead(tn) and h.status == HistoryRecord.MATCH:
# print(h.call_stack, pos, h.line_col)
last_record = self.history__[-2] if len(self.history__) > 1 else None # type: Optional[HistoryRecord]
last_record = self.history__[-2] if len(self.history__) > 1 \
else None # type: Optional[HistoryRecord]
return last_record and parser != self.root_parser__ \
and any(h.status == HistoryRecord.MATCH # or was it HistoryRecord.MATCH !?
and any(is_lookahead(tn) and location >= len(self.document__)
for tn, location in h.call_stack)
for h in self.history__[:-1])
and any(h.status == HistoryRecord.MATCH # or was it HistoryRecord.MATCH !?
and any(is_lookahead(tn) and location >= len(self.document__)
for tn, location in h.call_stack)
for h in self.history__[:-1])
# assert isinstance(document, str), type(document)
if self._dirty_flag__:
......@@ -1165,10 +1164,7 @@ class Grammar:
parser.reset()
else:
self._dirty_flag__ = True
# save_history_tracking = self.history_tracking__
# self.history_tracking__ = track_history or self.history_tracking__ or self.resume_notices__
# track history contains and retains the current tracking state
# track_history = self.history_tracking__
self.document__ = StringView(document)
self.document_length__ = len(self.document__)
self._document_lbreaks__ = linebreaks(document) if self.history_tracking__ else []
......@@ -1254,7 +1250,8 @@ class Grammar:
# # stop history tracking when parser returned too early
# self.history_tracking__ = False
else:
rest = StringView('') # if complete_match is False, ignore the rest and leave while loop
# if complete_match is False, ignore the rest and leave while loop
rest = StringView('')
if stitches:
if rest:
stitches.append(Node(ZOMBIE_TAG, rest))
......@@ -1334,10 +1331,11 @@ class Grammar:
"""
error_list = [] # type: List[GrammarErrorType]
def visit_parser(parser: Parser) -> None:
nonlocal error_list
# self.root_parser__.apply(visit_parser) # disabled, because no use case as of now
# disabled, because no use case as of now
# def visit_parser(parser: Parser) -> None:
# nonlocal error_list
#
# self.root_parser__.apply(visit_parser)
return error_list
......@@ -1914,7 +1912,7 @@ def mandatory_violation(grammar: Grammar,
else:
msg = '%s expected, »%s« found!' % (expected, found)
error = Error(msg, location, Error.MANDATORY_CONTINUATION_AT_EOF
if (failed_on_lookahead and not text_) else Error.MANDATORY_CONTINUATION)
if (failed_on_lookahead and not text_) else Error.MANDATORY_CONTINUATION)
grammar.tree__.add_error(err_node, error)
return error, err_node, text_[i:]
......@@ -1950,7 +1948,7 @@ class Series(NaryParser):
def __init__(self, *parsers: Parser,
mandatory: int = NO_MANDATORY,
err_msgs: MessagesType=[],