Commit 638c0d11 authored by eckhart's avatar eckhart

- mypy type errors corrected

parent 3862c2a3
......@@ -450,6 +450,7 @@ def compile_on_disk(source_file: str, compiler_suite="", extension=".xml") -> It
A (potentially empty) list of error or warning messages.
"""
filepath = os.path.normpath(source_file)
f = None # Optional[TextIO]
with open(source_file, encoding="utf-8") as f:
source = f.read()
rootname = os.path.splitext(filepath)[0]
......@@ -548,8 +549,10 @@ def compile_on_disk(source_file: str, compiler_suite="", extension=".xml") -> It
f.write(result.as_xml())
else:
f.write(result.as_sxpr())
else:
elif isinstance(result, str):
f.write(result)
else:
raise AssertionError('Illegal result type: ' + str(type(result)))
except (PermissionError, FileNotFoundError, IOError) as error:
print('# Could not write file "' + rootname + '.py" because of: '
+ "\n# ".join(str(error).split('\n)')))
......
......@@ -24,23 +24,24 @@ conforming to this grammar into contrete syntax trees.
"""
import keyword
from collections import OrderedDict
from functools import partial
import keyword
import os
from DHParser.compile import CompilerError, Compiler
from DHParser.error import Error
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
from DHParser.syntaxtree import Node, RootNode, WHITESPACE_PTYPE, TOKEN_PTYPE
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
GLOBALS, CONFIG_PRESET, get_config_value, unrepr, typing
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
reduce_single_child, replace_by_single_child, remove_expendables, \
remove_tokens, flatten, forbid, assert_content, remove_infix_operator
remove_tokens, flatten, forbid, assert_content
from DHParser.versionnumber import __version__
from typing import Callable, Dict, List, Set, Tuple, Any
from typing import Callable, Dict, List, Set, Tuple, Union
__all__ = ('get_ebnf_preprocessor',
......@@ -774,13 +775,13 @@ class EBNFCompiler(Compiler):
# self.directives['testing'] = value.lower() not in {"off", "false", "no"}
elif key == 'literalws':
value = {child.content.strip().lower() for child in node.children[1:]}
if ((value - {'left', 'right', 'both', 'none'})
or ('none' in value and len(value) > 1)):
values = {child.content.strip().lower() for child in node.children[1:]}
if ((values - {'left', 'right', 'both', 'none'})
or ('none' in values and len(values) > 1)):
self.tree.new_error(node, 'Directive "literalws" allows only `left`, `right`, '
'`both` or `none`, not `%s`' % ", ".join(value))
wsp = {'left', 'right'} if 'both' in value \
else {} if 'none' in value else value
'`both` or `none`, not `%s`' % ", ".join(values))
wsp = {'left', 'right'} if 'both' in values \
else {} if 'none' in values else values
self.directives[key] = list(wsp)
elif key in {'tokens', 'preprocessor_tokens'}:
......@@ -822,7 +823,7 @@ class EBNFCompiler(Compiler):
self.tree.new_error(node, 'Reentry conditions for "%s" have already been defined'
' earlier!' % symbol)
else:
reentry_conditions = []
reentry_conditions = [] # type: List[Union[unrepr, str]]
for child in node.children[1:]:
if child.parser.name == 'regexp':
reentry_conditions.append(unrepr("re.compile(r'%s')" % extract_regex(child)))
......
......@@ -56,7 +56,7 @@ import threading
from DHParser.error import line_col
from DHParser.stringview import StringView
from DHParser.syntaxtree import Node
from DHParser.syntaxtree import Node, ParserBase
from DHParser.toolkit import is_filename, escape_control_characters, GLOBALS, typing
from typing import List, Tuple, Union
......@@ -220,9 +220,9 @@ class HistoryRecord:
'\n</style>\n</head>\n<body>\n')
HTML_LEAD_OUT = '\n</body>\n</html>\n'
def __init__(self, call_stack: List['Parser'], node: Node, text: StringView) -> None:
def __init__(self, call_stack: List['ParserBase'], node: Node, text: StringView) -> None:
# copy call stack, dropping uninformative Forward-Parsers
self.call_stack = [p for p in call_stack if p.ptype != ":Forward"] # type: List['Parser']
self.call_stack = [p for p in call_stack if p.ptype != ":Forward"] # type: List['ParserBase']
self.node = node # type: Node
self.text = text # type: StringView
self.line_col = (1, 1) # type: Tuple[int, int]
......
......@@ -38,7 +38,7 @@ from DHParser.log import is_logging, HistoryRecord
from DHParser.preprocess import BEGIN_TOKEN, END_TOKEN, RX_TOKEN_NAME
from DHParser.stringview import StringView, EMPTY_STRING_VIEW
from DHParser.syntaxtree import Node, RootNode, ParserBase, WHITESPACE_PTYPE, \
TOKEN_PTYPE, ZOMBIE_PARSER
TOKEN_PTYPE, ZOMBIE_PARSER, ResultType
from DHParser.toolkit import sane_parser_name, escape_control_characters, re, typing
from typing import Callable, cast, List, Tuple, Set, Dict, DefaultDict, Union, Optional, Any
......@@ -109,17 +109,6 @@ class ParserError(Exception):
self.first_throw = first_throw # type: bool
class ResumeRule:
"""
Rule for resuming after a parser error was caught. A resmue rule
consists of a parser name and a list of compiled regular expressions
or strings.
"""
def __init__(self, parser: Union[ParserBase, str], resume: List[Union[str, Any]]):
self.parser_name = parser if isinstance(parser, str) else parser.name # type: str
self.resume = resume # type: List[Union[str, Any]]
ResumeList = List[Union[str, Any]] # list of strings or regular expressiones
......@@ -164,7 +153,7 @@ def add_parser_guard(parser_func):
def guarded_call(parser: 'Parser', text: StringView) -> Tuple[Optional[Node], StringView]:
"""Wrapper method for Parser.__call__. This is used to add in an aspect-oriented
fashion the business intelligence that is common to all parsers."""
grammar = parser.grammar
grammar = parser._grammar # read protected member instead property to avoid function call
location = grammar.document_length__ - len(text)
try:
......@@ -192,6 +181,7 @@ def add_parser_guard(parser_func):
# PARSER CALL: run original __call__ method
node, rest = parser_func(parser, text)
except ParserError as error:
# does this play well with variable setting? add rollback clause here? tests needed...
gap = len(text) - len(error.rest)
rules = grammar.resume_rules__.get(parser.name, [])
rest = error.rest[len(error.node):]
......@@ -211,7 +201,7 @@ def add_parser_guard(parser_func):
elif error.first_throw:
raise ParserError(error.node, error.rest, first_throw=False)
else:
result = (Node(None, text[:gap]), error.node) if gap else error.node
result = (Node(None, text[:gap]), error.node) if gap else error.node # type: ResultType
raise ParserError(Node(parser, result), text, first_throw=False)
if grammar.left_recursion_handling__:
......@@ -329,7 +319,7 @@ class Parser(ParserBase):
def __init__(self) -> None:
# assert isinstance(name, str), str(name)
super().__init__()
self._grammar = None # type: Optional['Grammar']
self._grammar = ZOMBIE_GRAMMAR # type: Grammar
self.reset()
# add "aspect oriented" wrapper around parser calls
......@@ -380,21 +370,20 @@ class Parser(ParserBase):
return Alternative(self, other)
@property
def grammar(self) -> Optional['Grammar']:
if self._grammar:
def grammar(self) -> 'Grammar':
if self._grammar != ZOMBIE_GRAMMAR:
return self._grammar
else:
raise AssertionError('Grammar has not yet been set!')
@grammar.setter
def grammar(self, grammar: 'Grammar'):
if self._grammar is None:
if self._grammar == ZOMBIE_GRAMMAR:
self._grammar = grammar
self._grammar_assigned_notifier()
else:
if self._grammar != grammar:
raise AssertionError("Parser has already been assigned"
"to a different Grammar object!")
elif self._grammar != grammar:
raise AssertionError("Parser has already been assigned"
"to a different Grammar object!")
def _grammar_assigned_notifier(self):
"""A function that notifies the parser object that it has been
......@@ -622,7 +611,7 @@ class Grammar:
root__ = ZOMBIE_PARSER # type: ParserBase
# root__ must be overwritten with the root-parser by grammar subclass
parser_initialization__ = "pending" # type: str
resume_rules__ = dict() # type: Dictionary[str, ResumeRule]
resume_rules__ = dict() # type: Dict[str, ResumeList]
# some default values
# COMMENT__ = r'' # type: str # r'#.*(?:\n|$)'
# WSP_RE__ = mixin_comment(whitespace=r'[\t ]*', comment=COMMENT__) # type: str
......@@ -666,7 +655,7 @@ class Grammar:
def __init__(self, root: Parser = None) -> None:
self.all_parsers__ = set() # type: Set[ParserBase]
self.start_parser__ = None # type: ParserBase
self.start_parser__ = None # type: Optional[ParserBase]
self._dirty_flag__ = False # type: bool
self.history_tracking__ = False # type: bool
self.memoization__ = True # type: bool
......@@ -709,7 +698,7 @@ class Grammar:
self.rollback__ = [] # type: List[Tuple[int, Callable]]
self.last_rb__loc__ = -1 # type: int
# support for call stack tracing
self.call_stack__ = [] # type: List[Parser]
self.call_stack__ = [] # type: List[ParserBase]
# snapshots of call stacks
self.history__ = [] # type: List[HistoryRecord]
# also needed for call stack tracing
......@@ -930,6 +919,9 @@ def dsl_error_msg(parser: Parser, error_str: str) -> str:
return " ".join(msg)
ZOMBIE_GRAMMAR = Grammar()
########################################################################
#
# _Token and Regular Expression parser classes (i.e. leaf classes)
......
......@@ -41,6 +41,9 @@ __all__ = ('ParserBase',
'ZombieParser',
'ZOMBIE_PARSER',
'ZOMBIE_NODE',
'ResultType',
'StrictResultType',
'ChildrenType',
'Node',
'RootNode',
'ZOMBIE_ROOTNODE',
......@@ -89,10 +92,10 @@ class ParserBase:
pass
@property
def grammar(self) -> Optional['Grammar']:
def grammar(self) -> 'Grammar':
"""Returns the Grammar object to which the parser belongs. If not
yet connected to any Grammar object, None is returned."""
return None
raise NotImplementedError
def apply(self, func: Callable) -> bool:
"""Applies the function `func` to the parser. Returns False, if
......
......@@ -41,7 +41,7 @@ except ImportError:
import DHParser.foreign_typing as typing
sys.modules['typing'] = typing # make it possible to import from typing
from typing import Any, Iterable, Sequence, Set, Union, Dict # , cast
from typing import Any, Iterable, Sequence, Set, Union, Dict, Hashable # , cast
__all__ = ('escape_re',
......@@ -72,10 +72,10 @@ __all__ = ('escape_re',
#######################################################################
GLOBALS = threading.local()
CONFIG_PRESET = {}
CONFIG_PRESET = dict() # type: Dict[Hashable, Any]
def get_config_value(key):
def get_config_value(key: Hashable) -> Any:
"""
Retrieves a configuration value thread-safely.
:param key: the key (an immutable, usually a string)
......@@ -84,7 +84,7 @@ def get_config_value(key):
try:
cfg = GLOBALS.config
except AttributeError:
GLOBALS.config = {}
GLOBALS.config = dict()
cfg = GLOBALS.config
try:
return cfg[key]
......@@ -94,7 +94,7 @@ def get_config_value(key):
return value
def set_config_value(key, value):
def set_config_value(key: Hashable, value: Any):
"""
Changes a configuration value thread-safely. The configuration
value will be set only for the current thread. In order to
......@@ -106,7 +106,7 @@ def set_config_value(key, value):
try:
_ = GLOBALS.config
except AttributeError:
GLOBALS.config = {}
GLOBALS.config = dict()
GLOBALS.config[key] = value
......
......@@ -239,7 +239,8 @@ def selftest() -> bool:
print("DHParser selftest...")
print("\nSTAGE I: Trying to compile EBNF-Grammar:\n")
builtin_ebnf_parser = get_ebnf_grammar()
ebnf_src = builtin_ebnf_parser.__doc__[builtin_ebnf_parser.__doc__.find('#'):]
docstring = str(builtin_ebnf_parser.__doc__) # type: str
ebnf_src = docstring[docstring.find('#'):]
ebnf_transformer = get_ebnf_transformer()
ebnf_compiler = get_ebnf_compiler('EBNF')
result, errors, _ = compile_source(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment