Commit 57cd96a3 authored by eckhart's avatar eckhart
Browse files

some code cleanup and typo corrections

parent 749247a5
......@@ -252,6 +252,7 @@ ResultTuple = Tuple[Optional[Any], List[Error], Optional[Node]]
def filter_stacktrace(stacktrace: List[str]) -> List[str]:
"""Removes those frames from a formatted stacktrace that are located
within the DHParser-code."""
n = 0
for n, frame in enumerate(stacktrace):
i = frame.find('"')
k = frame.find('"', i + 1)
......@@ -332,7 +333,7 @@ def compile_source(source: str,
# AST-transformation
if is_error(syntax_tree.error_flag):
# catch Python exception, because if an error has occured
# catch Python exception, because if an error has occurred
# earlier, the syntax tree might not look like expected,
# which could (fatally) break AST transformations.
try:
......@@ -421,7 +422,7 @@ def process_tree(tp: TreeProcessor, tree: RootNode) -> RootNode:
Although process_tree returns the root-node of the processed tree,
tree processing should generally be assumed to change the tree
in place, even if a different root-node is returned than was passed
to the tree. If the input tree shall be prserved, it is necessary to
to the tree. If the input tree shall be preserved, it is necessary to
make a deep copy of the input tree, before calling process_tree.
"""
assert isinstance(tp, TreeProcessor)
......
......@@ -254,7 +254,7 @@ SXPRESSION_SERIALIZATION = "S-expression"
SERIALIZATIONS = frozenset({XML_SERIALIZATION,
JSON_SERIALIZATION,
INDENTED_SERIALIZATION,
SXPRESSION_SERIALIZATION,})
SXPRESSION_SERIALIZATION})
CONFIG_PRESET['cst_serialization'] = SXPRESSION_SERIALIZATION
CONFIG_PRESET['ast_serialization'] = SXPRESSION_SERIALIZATION
......@@ -307,7 +307,7 @@ CONFIG_PRESET['log_syntax_trees'] = set()
# Default value: "none"
CONFIG_PRESET['static_analysis'] = "early"
# DHParser.ebnfy.EBNFCompiler class adds the the EBNF-grammar to the
# DHParser.ebnf.EBNFCompiler class adds the the EBNF-grammar to the
# docstring of the generated Grammar-class
# Default value: False
CONFIG_PRESET['add_grammar_source_to_parser_docstring'] = False
......@@ -344,12 +344,12 @@ CONFIG_PRESET['default_literalws'] = "none"
# alternative-parser. Does not allow regular expressions between, i.e.
# / ... / within the EBNF-code!
# 'strict' - allows both classic and regex-like syntax to be mixed, but
# allows character ranges within square brackets with oridinal values,
# allows character ranges within square brackets with ordinal values,
# only. Uses | as delimiter for alternatives.
# 'heuristic' - the most liberal mode, allows about everything. However,
# because it employs heuristics to distinguish ambiguous cases, it
# may lead to unexcpeted errors and require the user to resolve the
# ambiguieties
# may lead to unexpected errors and require the user to resolve the
# ambiguities
EBNF_CLASSIC_SYNTAX = "classic"
EBNF_ANY_SYNTAX_STRICT = "strict"
......@@ -366,7 +366,7 @@ CONFIG_PRESET['syntax_variant'] = EBNF_ANY_SYNTAX_STRICT
#
########################################################################
# Maximum allowed source size for reomote procedure calls (including
# Maximum allowed source size for remote procedure calls (including
# parameters) in server.Server. The default value is rather large in
# order to allow transmitting complete source texts as parameter.
# Default value: 4 MB
......@@ -377,7 +377,7 @@ CONFIG_PRESET['max_rpc_size'] = 4 * 1024 * 1024
# Default value: True
CONFIG_PRESET['jsonrpc_header'] = True
# Defaut host name or IP-adress for the compiler server. Should usually
# Default host name or IP-address for the compiler server. Should usually
# be localhost (127.0.0.1)
# Default value: 127.0.0.1.
CONFIG_PRESET['server_default_host'] = "127.0.0.1"
......@@ -450,4 +450,4 @@ CONFIG_PRESET['test_parallelization'] = True
# resulting from parsers that contain lookahead checks may have a
# structure that would not occur outside the testing-environment.
# Default value: True
CONFIG_PRESET['test_supress_lookahead_failures'] = True
CONFIG_PRESET['test_suppress_lookahead_failures'] = True
......@@ -42,7 +42,7 @@ from DHParser.transform import TransformationFunc, TransformationDict
from DHParser.toolkit import DHPARSER_DIR, DHPARSER_PARENTDIR, load_if_file, is_python_code, \
compile_python_object, re, as_identifier, is_filename, relative_path
from typing import Any, cast, List, Tuple, Union, Iterator, Iterable, Optional, \
Callable, Generator
Callable, Sequence
__all__ = ('DefinitionError',
......@@ -91,7 +91,7 @@ class DSLException(Exception):
"""
Base class for DSL-exceptions.
"""
def __init__(self, errors: Union[List[Error], Generator[Error, None, None]]):
def __init__(self, errors: Union[Sequence[Error], Iterator[Error]]):
assert isinstance(errors, Iterator) or isinstance(errors, list) \
or isinstance(errors, tuple)
self.errors = list(errors)
......@@ -150,12 +150,12 @@ def grammar_instance(grammar_representation) -> Tuple[Grammar, str]:
parser_py = grammar_src # type: str
messages = [] # type: List[Error]
else:
log_dir = suspend_logging()
lg_dir = suspend_logging()
result, messages, _ = compile_source(
grammar_src, None,
get_ebnf_grammar(), get_ebnf_transformer(), get_ebnf_compiler())
parser_py = cast(str, result)
resume_logging(log_dir)
resume_logging(lg_dir)
if has_errors(messages):
raise DefinitionError(only_errors(messages), grammar_src)
imports = DHPARSER_IMPORTS.format(
......@@ -247,7 +247,7 @@ def compileEBNF(ebnf_src: str, branding="DSL") -> str:
compiler = raw_compileEBNF(ebnf_src, branding)
src = ["#/usr/bin/python\n",
SECTION_MARKER.format(marker=SYMBOLS_SECTION),
DHPARSER_IMPORTS.format(dhparser_parentdir = relative_dhpath),
DHPARSER_IMPORTS.format(dhparser_parentdir=relative_dhpath),
SECTION_MARKER.format(marker=PREPROCESSOR_SECTION), compiler.gen_preprocessor_skeleton(),
SECTION_MARKER.format(marker=PARSER_SECTION), compiler.result,
SECTION_MARKER.format(marker=AST_SECTION), compiler.gen_transformer_skeleton(),
......@@ -257,7 +257,7 @@ def compileEBNF(ebnf_src: str, branding="DSL") -> str:
@functools.lru_cache()
def grammar_provider(ebnf_src: str, branding="DSL", additional_code: str='') -> ParserFactoryFunc:
def grammar_provider(ebnf_src: str, branding="DSL", additional_code: str = '') -> ParserFactoryFunc:
"""
Compiles an EBNF-grammar and returns a grammar-parser provider
function for that grammar.
......@@ -283,7 +283,7 @@ def grammar_provider(ebnf_src: str, branding="DSL", additional_code: str='') ->
append_log(log_name, grammar_src)
else:
print(grammar_src)
imports = DHPARSER_IMPORTS.format(dhparser_parentdir = relative_path('.', DHPARSER_PARENTDIR))
imports = DHPARSER_IMPORTS.format(dhparser_parentdir=relative_path('.', DHPARSER_PARENTDIR))
grammar_factory = compile_python_object('\n'.join([imports, additional_code, grammar_src]),
r'get_(?:\w+_)?grammar$')
if callable(grammar_factory):
......@@ -292,7 +292,7 @@ def grammar_provider(ebnf_src: str, branding="DSL", additional_code: str='') ->
raise ValueError('Could not compile grammar provider!')
def create_parser(ebnf_src: str, branding="DSL", additional_code: str='') -> Grammar:
def create_parser(ebnf_src: str, branding="DSL", additional_code: str = '') -> Grammar:
"""Compiles the ebnf source into a callable Grammar-object. This is
essentially syntactic sugar for `grammar_provider(ebnf)()`.
"""
......@@ -300,7 +300,7 @@ def create_parser(ebnf_src: str, branding="DSL", additional_code: str='') -> Gra
return grammar_factory()
def split_source(file_name: str, file_content: str) -> Tuple[str]:
def split_source(file_name: str, file_content: str) -> List[str]:
"""Splits the `file_content` into the seven sections: intro, imports,
preprocessor_py, parser_py, ast_py, compiler_py, outro.
Raises a value error, if the number of sections if not equal to 7.
......@@ -328,7 +328,7 @@ def load_compiler_suite(compiler_suite: str) -> \
assert isinstance(compiler_suite, str)
source = load_if_file(compiler_suite)
dhpath = relative_path(os.path.dirname('.'), DHPARSER_PARENTDIR)
imports = DHPARSER_IMPORTS.format(dhparser_parentdir = dhpath)
imports = DHPARSER_IMPORTS.format(dhparser_parentdir=dhpath)
if is_python_code(compiler_suite):
sections = split_source(compiler_suite, source)
_, imports, preprocessor_py, parser_py, ast_py, compiler_py, _ = sections
......@@ -340,11 +340,11 @@ def load_compiler_suite(compiler_suite: str) -> \
else:
# Assume source is an ebnf grammar.
# Is there really any reasonable application case for this?
log_dir = suspend_logging()
lg_dir = suspend_logging()
compiler_py, messages, _ = compile_source(source, None, get_ebnf_grammar(),
get_ebnf_transformer(),
get_ebnf_compiler(compiler_suite, source))
resume_logging(log_dir)
resume_logging(lg_dir)
if has_errors(messages):
raise DefinitionError(only_errors(messages), source)
preprocessor = get_ebnf_preprocessor
......@@ -360,7 +360,7 @@ def is_outdated(compiler_suite: str, grammar_source: str) -> bool:
"""
Returns ``True`` if the ``compile_suite`` needs to be updated.
An update is needed, if either the grammar in the compieler suite
An update is needed, if either the grammar in the compiler suite
does not reflect the latest changes of ``grammar_source`` or if
sections from the compiler suite have diligently been overwritten
with whitespace order to trigger their recreation. Note: Do not
......@@ -488,7 +488,7 @@ def compile_on_disk(source_file: str, compiler_suite="", extension=".xml") -> It
'Possibly due to a forgotten import at the beginning ' \
'of the AST-Block (!)'.format(str(e))
else:
err_str = 'Exception {} while compiling AST-Tansformation: {}' \
err_str = 'Exception {} while compiling AST-Transformation: {}' \
.format(str(type(e)), str(e))
messages.append(Error(err_str, 0, CANNOT_VERIFY_TRANSTABLE_WARNING))
if is_logging():
......
......@@ -20,7 +20,7 @@
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
conforming to this grammar into concrete syntax trees.
"""
......@@ -53,7 +53,8 @@ from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
from DHParser.versionnumber import __version__
__all__ = ('get_ebnf_preprocessor',
__all__ = ('DHPARSER_IMPORTS',
'get_ebnf_preprocessor',
'get_ebnf_grammar',
'get_ebnf_transformer',
'get_ebnf_compiler',
......@@ -471,7 +472,7 @@ def grammar_changed(grammar_class, grammar_source: str) -> bool:
pycode = f.read()
m = re.search(r'class \w*\(Grammar\)', pycode)
if m:
m = re.search(' source_hash__ *= *"([a-z0-9]*)"',
m = re.search(' {4}source_hash__ *= *"([a-z0-9]*)"',
pycode[m.span()[1]:])
return not (m and m.groups() and m.groups()[-1] == chksum)
else:
......@@ -682,8 +683,8 @@ KNOWN_DIRECTIVES = {
'anonymous': 'List of symbols that are NOT to appear as tag-names',
'drop': 'List of tags to be dropped early from syntax tree, '
'special values: strings, whitespace, regexps',
'$SYMBOL_filer': 'Function that transforms captured values of the givensymbol on retrieval',
'$SYMBOL_error': 'Pair of regular epxression an custom error message if regex matches',
'$SYMBOL_filer': 'Function that transforms captured values of the given symbol on retrieval',
'$SYMBOL_error': 'Pair of regular expression an custom error message if regex matches',
'$SYMBOL_skip': 'List of regexes or functions to find reentry point after an error',
'$SYMBOL_resume': 'List or regexes or functions to find reentry point for parent parser'
}
......@@ -839,10 +840,10 @@ class EBNFCompiler(Compiler):
`whitespace__` that need to be defined at the beginning
of the grammar class because they are referred to later.
deferred_tasks: A list of callables that is filled during
compilatation, but that will be executed only after
deferred_tasks: A list of callabels that is filled during
compilation, but that will be executed only after
compilation has finished. Typically, it contains
sementatic checks that require information that
semantic checks that require information that
is only available upon completion of compilation.
root_symbol: The name of the root symbol.
......@@ -1901,7 +1902,7 @@ class EBNFCompiler(Compiler):
if self.anonymous_regexp.match(arg):
self.tree.new_error(
node, ('Retrive operator "%s" does not work with anonymous parsers like %s')
node, ('Retrieve operator "%s" does not work with anonymous parsers like %s')
% (prefix, arg))
return arg
......@@ -1956,7 +1957,7 @@ class EBNFCompiler(Compiler):
def extract_counted(self, node) -> Tuple[Node, Tuple[int, int]]:
"""Returns the content of a counted-node in a normalized form:
(node, (n, m)) where node is root of the sub-parser that is counted,
i.e. repeated n or n upto m times.
i.e. repeated n or n up to m times.
"""
assert node.tag_name == 'counted'
assert len(node.children) == 2
......@@ -2136,7 +2137,7 @@ def get_ebnf_compiler(grammar_name="", grammar_source="") -> EBNFCompiler:
def compile_ebnf_ast(ast: Node) -> str:
"""Compiles the abstract-syntax-tree of an EBNF-source-text into
python code of a class derived from `parse.Grammar` that can
parse text following the grammar describend with the EBNF-code."""
parse text following the grammar described with the EBNF-code."""
return get_ebnf_compiler()(ast)
......
......@@ -72,6 +72,7 @@ __all__ = ('ErrorCode',
'PARSER_STOPPED_BEFORE_END',
'PARSER_LOOKAHEAD_MATCH_ONLY',
'CAPTURE_STACK_NOT_EMPTY',
'AUTORETRIEVED_SYMBOL_NOT_CLEARED',
'MALFORMED_ERROR_STRING',
'AMBIGUOUS_ERROR_HANDLING',
'REDEFINED_DIRECTIVE',
......@@ -112,7 +113,7 @@ RESUME_NOTICE = ErrorCode(50)
REDECLARED_TOKEN_WARNING = ErrorCode(120)
UNUSED_ERROR_HANDLING_WARNING = ErrorCode(130)
LEFT_RECURSION_WARNING = ErrorCode(140)
LEFT_RECURSION_WARNING = ErrorCode(140) # obsolete!
UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING = ErrorCode(610)
CANNOT_VERIFY_TRANSTABLE_WARNING = ErrorCode(620)
......@@ -227,7 +228,7 @@ def is_error(code: int) -> bool:
def is_fatal(code: int) -> bool:
"""Returns True, ir error is fatal. Fatal errors are typically raised
when a crash (i.e. Python exception) occurs at later stages of the
processing pipline (e.g. ast transformation, compiling). """
processing pipeline (e.g. ast transformation, compiling). """
return code >= FATAL
......
......@@ -37,7 +37,7 @@ to which either ``False`` (turn off logging), a log directory name or
The other components of DHParser check whether logging is on and
write log files in the the logging directory accordingly. Usually,
this will be concrete and abstract syntax trees as well as the full
and abreviated parsing history.
and abbreviated parsing history.
Example::
......@@ -186,7 +186,7 @@ def is_logging(thread_local_query: bool = True) -> bool:
def create_log(log_name: str) -> str:
"""
Creates a new log file. If log_name is not just a file name but a path with
at least one directoy (which can be './') the file is not created in the
at least one directory (which can be './') the file is not created in the
configured log directory but at the given path. If a file with the same
name already exists, it will be overwritten.
......@@ -375,10 +375,10 @@ class HistoryRecord:
dots = '...' if len(self.text) > n else ''
excerpt = excerpt + '<span class="unmatched">' + self.text[:n] + dots + '</span>'
i = stack.rfind('-&gt;')
chr = stack[i + 12:i + 13]
while not chr.isidentifier() and i >= 0:
ch = stack[i + 12:i + 13]
while not ch.isidentifier() and i >= 0:
i = stack.rfind('-&gt;', 0, i)
chr = stack[i + 12:i + 13]
ch = stack[i + 12:i + 13]
if i >= 0:
i += 12
k = stack.find('<', i)
......@@ -511,7 +511,7 @@ def log_ST(syntax_tree, log_file_name) -> bool:
LOG_SIZE_THRESHOLD = 10000 # maximum number of history records to log
LOG_TAIL_THRESHOLD = 500 # maximum number of history recors for "tail log"
LOG_TAIL_THRESHOLD = 500 # maximum number of history records for "tail log"
def log_parsing_history(grammar, log_file_name: str = '', html: bool = True) -> bool:
......
......@@ -37,7 +37,7 @@ from typing import Callable, cast, List, Tuple, Set, Dict, \
from DHParser.configuration import get_config_value
from DHParser.error import Error, ErrorCode, is_error, MANDATORY_CONTINUATION, \
LEFT_RECURSION_WARNING, UNDEFINED_RETRIEVE, PARSER_LOOKAHEAD_FAILURE_ONLY, \
UNDEFINED_RETRIEVE, PARSER_LOOKAHEAD_FAILURE_ONLY, \
PARSER_LOOKAHEAD_MATCH_ONLY, PARSER_STOPPED_BEFORE_END, PARSER_NEVER_TOUCHES_DOCUMENT, \
MALFORMED_ERROR_STRING, MANDATORY_CONTINUATION_AT_EOF, DUPLICATE_PARSERS_IN_ALTERNATIVE, \
CAPTURE_WITHOUT_PARSERNAME, CAPTURE_DROPPED_CONTENT_WARNING, LOOKAHEAD_WITH_OPTIONAL_PARSER, \
......@@ -133,7 +133,7 @@ class ParserError(Exception):
return "%i: %s %s" % (self.node.pos, str(self.rest[:25]), repr(self.node))
ResumeList = List[Union[RxPatternType, str, Callable]] # list of strings or regular expressiones
ResumeList = List[Union[RxPatternType, str, Callable]] # list of strings or regular expressions
ReentryPointAlgorithm = Callable[[StringView, int], Tuple[int, int]]
# (text, start point) => (reentry point, match length)
# A return value of (-1, x) means that no reentry point before the end of the document was found
......@@ -237,7 +237,7 @@ def reentry_point(rest: StringView,
ApplyFunc = Callable[[List['Parser']], Optional[bool]]
# The return value of `True` stops any further application
# The return value of `True` stops any further application
FlagFunc = Callable[[ApplyFunc, Set[ApplyFunc]], bool]
ParseFunc = Callable[[StringView], Tuple[Optional[Node], StringView]]
......@@ -285,7 +285,7 @@ class Parser:
pname: The parser's name or a (possibly empty) alias name in case
of an anonymous parser.
anonymous: A property indicating that the parser remains anynomous
anonymous: A property indicating that the parser remains anonymous
anonymous with respect to the nodes it returns. For performance
reasons this is implemented as an object variable rather
than a property. This property must always be equal to
......@@ -478,7 +478,7 @@ class Parser:
if not grammar.returning_from_recursion__:
grammar.returning_from_recursion__ = recursion_state
except RecursionError as e:
except RecursionError:
node = Node(ZOMBIE_TAG, str(text[:min(10, max(1, text.find("\n")))]) + " ...")
node._pos = location
error = Error("maximum recursion depth of parser reached; potentially due to too many "
......@@ -563,7 +563,7 @@ class Parser:
"to a different Grammar object!")
except AttributeError:
pass # ignore setting of grammar attribute for placeholder parser
except NameError: # Cython: No access to GRAMMA_PLACEHOLDER, yet :-(
except NameError: # Cython: No access to GRAMMAR_PLACEHOLDER, yet :-(
self._grammar = grammar
def sub_parsers(self) -> Tuple['Parser', ...]:
......@@ -640,7 +640,7 @@ class Parser:
return self._apply(func, [], positive_flip)
def static_error(self, msg: str, code: ErrorCode) -> 'AnalysisError':
return (self.symbol, self, Error(msg, 0, code))
return self.symbol, self, Error(msg, 0, code)
def static_analysis(self) -> List['AnalysisError']:
"""Analyses the parser for logical errors after the grammar has been
......@@ -649,7 +649,7 @@ class Parser:
def copy_parser_base_attrs(src: Parser, duplicate: Parser):
"""Duplicates all attributes of the Parser-class from source to dest."""
"""Duplicates all attributes of the Parser-class from `src` to `duplicate`."""
duplicate.pname = src.pname
duplicate.anonymous = src.anonymous
duplicate.drop_content = src.drop_content
......@@ -1032,7 +1032,8 @@ class Grammar:
anonymous__ = RX_NEVER_MATCH # type: RxPatternType
# some default values
COMMENT__ = r'' # type: str # r'#.*(?:\n|$)'
WSP_RE__ = mixin_comment(whitespace=r'[\t ]*', comment=COMMENT__) # type: str
WHITESPACE__ = r'[\t ]*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__) # type: str
static_analysis_pending__ = [True] # type: List[bool]
static_analysis_errors__ = [] # type: List[AnalysisError]
......@@ -1515,8 +1516,8 @@ class Grammar:
error_list.extend(parser.static_analysis())
if parser.pname and not has_leaf_parsers(parser):
error_list.append((parser.symbol, parser, Error(
'Parser %s is entirely cyclical and, therefore, cannot even '
'touch the parsed document' % parser.location_info(),
'Parser %s is entirely cyclical and, therefore, cannot even touch '
'the parsed document' % cast('CombinedParser', parser).location_info(),
0, PARSER_NEVER_TOUCHES_DOCUMENT)))
return error_list
......@@ -1548,7 +1549,7 @@ GRAMMAR_PLACEHOLDER = Grammar()
########################################################################
#
# Special parser classes: Alway, Never, PreprocessorToken (leaf classes)
# Special parser classes: Always, Never, PreprocessorToken (leaf classes)
#
########################################################################
......@@ -1871,7 +1872,7 @@ class CombinedParser(Parser):
def location_info(self) -> str:
"""Returns a description of the location of the parser within the grammar
for the purpose of transparent erorr reporting."""
for the purpose of transparent error reporting."""
return '%s%s in definition of "%s" as %s' % (self.pname or '_', self.ptype, self.symbol, str(self))
......@@ -1909,7 +1910,7 @@ class UnaryParser(CombinedParser):
class NaryParser(CombinedParser):
"""
Base class of all Nnary parsers, i.e. parser that
Base class of all Nary parsers, i.e. parser that
contains one or more other parsers, like the alternative
parser for example.
......@@ -2083,7 +2084,7 @@ class OneOrMore(UnaryParser):
errors = super().static_analysis()
if self.parser.is_optional():
errors.append(self.static_error(
"Use ZeroOrMore instead of nesting OneOrMore with an optional parser in " \
"Use ZeroOrMore instead of nesting OneOrMore with an optional parser in "
+ self.location_info(), BADLY_NESTED_OPTIONAL_PARSER))
return errors
......@@ -2247,7 +2248,6 @@ class MandatoryNary(NaryParser):
This is a helper function that abstracts functionality that is
needed by the Interleave- as well as the Series-parser.
:param parser: the grammar
:param text_: the point, where the mandatory violation. As usual the
string view represents the remaining text from this point.
:param failed_on_lookahead: True if the violating parser was a
......@@ -2310,11 +2310,11 @@ class MandatoryNary(NaryParser):
elif length == 0:
msg.append('Number of elements %i is below minimum length of 1' % length)
elif length >= NO_MANDATORY:
msg.append('Number of elemnts %i of series exceeds maximum length of %i' \
% (length, NO_MANDATORY))
msg.append('Number of elements %i of series exceeds maximum length of %i'
% (length, NO_MANDATORY))
elif not (0 <= self.mandatory < length or self.mandatory == NO_MANDATORY):
msg.append('Illegal value %i for mandatory-parameter in a parser with %i elements!'
% (self.mandatory, length))
% (self.mandatory, length))
if msg:
msg.insert(0, 'Illegal configuration of mandatory Nary-parser '
+ self.location_info())
......@@ -2369,7 +2369,7 @@ class Series(MandatoryNary):
# assert len(results) <= len(self.parsers) \
# or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE_TAG])
ret_node = self._return_values(results) # type: Node
if error and reloc < 0:
if error and reloc < 0: # no worry: reloc is always defined when error is True
raise ParserError(ret_node.with_pos(self.grammar.document_length__ - len(text_)),
text, error, first_throw=True)
return ret_node, text_
......@@ -2520,11 +2520,13 @@ class Alternative(NaryParser):
for i, p in enumerate(self.parsers):
if p.is_optional():
break
# no worry: p,i are defined, because self.parsers cannot be empty.
# See NaryParser.__init__()
errors.append(self.static_error(
"Parser-specification Error in " + self.location_info()
+ "\nOnly the very last alternative may be optional! "
+ 'Parser "%s" at position %i out of %i is optional'
%(p.tag_name, i + 1, len(self.parsers)),
% (p.tag_name, i + 1, len(self.parsers)),
BAD_ORDER_OF_ALTERNATIVES))
# check for errors like "A" | "AB" where "AB" would never be reached,
......@@ -2638,7 +2640,7 @@ class Interleave(MandatoryNary):
if length == n:
break # avoid infinite loop
nd = self._return_values(results) # type: Node
if error and reloc < 0:
if error and reloc < 0: # no worry: reloc is always defined when error is True
raise ParserError(nd.with_pos(self.grammar.document_length__ - len(text)),
text, error, first_throw=True)
return nd, text_
......@@ -2660,7 +2662,7 @@ class Interleave(MandatoryNary):
other_parsers = cast('Interleave', other).parsers if isinstance(other, Interleave) \
else cast(Tuple[Parser, ...], (other,)) # type: Tuple[Parser, ...]
other_repetitions = cast('Interleave', other).repetitions \
if isinstance(other, Interleave) else [(1, 1),]
if isinstance(other, Interleave) else [(1, 1), ]
other_mandatory = cast('Interleave', other).mandatory \
if isinstance(other, Interleave) else NO_MANDATORY
if other_mandatory == NO_MANDATORY:
......@@ -2724,7 +2726,7 @@ class FlowParser(UnaryParser):
Base class for all flow parsers like Lookahead and Lookbehind.
"""
def sign(self, bool_value) -> bool:
"""Returns the value. Can be overriden to return the inverted bool."""
"""Returns the value. Can be overridden to return the inverted bool."""
return bool_value
......@@ -2754,7 +2756,7 @@ class Lookahead(FlowParser):
errors = super().static_analysis()
if self.parser.is_optional():
errors.append((self.pname, self, Error(
'Lookahead %s does not make sense with optional parser "%s"!' \
'Lookahead %s does not make sense with optional parser "%s"!'
% (self.pname, str(self.parser)),
0, LOOKAHEAD_WITH_OPTIONAL_PARSER)))
return errors
......@@ -2873,7 +2875,6 @@ class Capture(UnaryParser):
return errors
MatchVariableFunc = Callable[[Union[StringView, str], List[str]], Optional[str]]
# (text, stack) -> value, where:
# text is the following text for be parsed
......@@ -2886,6 +2887,7 @@ MatchVariableFunc = Callable[[Union[StringView, str], List[str]], Optional[str]]
# Match functions, the name of which does not start with 'optional_', should
# on the contrary always return `None` if no match occurs!
def last_value(text: Union[StringView, str], stack: List[str]) -> str:
"""Matches `text` with the most recent value on the capture stack.
This is the default case when retrieving captured substrings."""
......@@ -2990,7 +2992,7 @@ class Retrieve(UnaryParser):
# returns a None match if parser is optional but there was no value to retrieve
return None, text
else:
node = Node(tn, '') # .with_pos(self.grammar.document_length__ - text.__len__())
node = Node(tn, '') # .with_pos(self.grammar.document_length__ - text.__len__())
self.grammar.tree__.new_error(
node, dsl_error_msg(self, "'%s' undefined or exhausted." % self.symbol_pname),
UNDEFINED_RETRIEVE)
......
......@@ -104,7 +104,7 @@ def _apply_preprocessors(text: str, preprocessors: Tuple[PreprocessorFunc, ...])
def chain_preprocessors(*preprocessors) -> PreprocessorFunc:
"""
Merges a seuqence of preprocessor functions in to a single function.
Merges a sequence of preprocessor functions in to a single function.
"""
return functools.partial(_apply_preprocessors, preprocessors=preprocessors)
......
......@@ -376,8 +376,8 @@ class ExecutionEnvironment:
"""
if self._closed:
return None, (-32000,
"Server Error: Execution environment has already been shut down! "\
"Cannot process method {} with parameters {} any more."\
"Server Error: Execution environment has already been shut down! "
"Cannot process method {} with parameters {} any more."
.format(method, params))
result = None # type: Optional[JSON_Type]
rpc_error = None # type: Optional[RPC_Error_Type]
......@@ -508,8 +508,8 @@ class Connection:
# do not do: del self.active_tasks[json_id] !!!
async def cleanup(self):
open_tasks = {task for id, task in self.active_tasks.items()
if id not in self.finished_tasks}
open_tasks = {task for idT, task in self.active_tasks.items()
if idT not in self.finished_tasks}
if open_tasks:
_, pending = await asyncio.wait(
open_tasks, timeout=3.0) # type: Set[asyncio.Future], Set[asyncio.Future]
......@@ -678,7 +678,7 @@ class Server:
# see: https://docs.python.org/3/library/asyncio-eventloop.html#executing-code-in-thread-or-process-pools
self.cpu_bound = frozenset(self.rpc_table.keys()) if cpu_bound == ALL_RPCs else cpu_bound
self.blocking = frozenset(self.rpc_table.keys()) if blocking == ALL_RPCs else blocking
self.blocking = self.blocking - self.cpu_bound # cpu_bound property takes precedence
self.blocking -= self.cpu_bound # cpu_bound property takes precedence
assert not (self.cpu_bound - self.rpc_table.keys())
assert not (self.blocking - self.rpc_table.keys())
......@@ -1404,7 +1404,7 @@ def detach_server(host: str = USE_DEFAULT_HOST,
# if sys.version_info >= (3, 7):
# await writer.wait_closed()
global python_interpreter_name_cached
# global python_interpreter_name_cached
host, port = substitute_default_host_and_port(host, port)
null_device = " >/dev/null" if platform.system() != "Windows" else " > NUL"
interpreter = sys.executable
......
......@@ -32,7 +32,7 @@ speedup. The modules comes with a ``stringview.pxd`` that contains some type
declarations to more fully exploit the benefits of the Cython-compiler.
"""
from typing import Optional, Union, Iterable, Tuple, List, Sequence, cast
from typing import Optional, Union, Iterable, Tuple, List, cast
try:
import cython
......@@ -146,7 +146,7 @@ class StringView: # collections.abc.Sized
# PERFORMANCE WARNING: This creates a copy of the string-slice
if self._fullstring: # optimization: avoid slicing/copying