Notice to GitKraken users: A vulnerability has been found in the SSH key generation of GitKraken versions 7.6.0 to 8.0.0 (https://www.gitkraken.com/blog/weak-ssh-key-fix). If you use GitKraken and have generated a SSH key using one of these versions, please remove it both from your local workstation and from your LRZ GitLab profile.

21.10.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit 603b99bb authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

drop empty anonymous nodes while parsing already

parent d004ab8c
...@@ -38,7 +38,7 @@ import copy ...@@ -38,7 +38,7 @@ import copy
import re import re
from DHParser.preprocess import strip_tokens, with_source_mapping, PreprocessorFunc from DHParser.preprocess import strip_tokens, with_source_mapping, PreprocessorFunc
from DHParser.syntaxtree import Node, RootNode, ZOMBIE_ROOTNODE, StrictResultType from DHParser.syntaxtree import Node, RootNode, ZOMBIE_TAG, StrictResultType
from DHParser.transform import TransformationFunc from DHParser.transform import TransformationFunc
from DHParser.parse import Grammar from DHParser.parse import Grammar
from DHParser.error import adjust_error_locations, is_error, Error from DHParser.error import adjust_error_locations, is_error, Error
...@@ -71,6 +71,9 @@ def visitor_name(node_name: str) -> str: ...@@ -71,6 +71,9 @@ def visitor_name(node_name: str) -> str:
return 'on_' + node_name return 'on_' + node_name
ROOTNODE_PLACEHOLDER = RootNode()
class Compiler: class Compiler:
""" """
Class Compiler is the abstract base class for compilers. Compiler Class Compiler is the abstract base class for compilers. Compiler
...@@ -104,7 +107,7 @@ class Compiler: ...@@ -104,7 +107,7 @@ class Compiler:
self._reset() self._reset()
def _reset(self): def _reset(self):
self.tree = ZOMBIE_ROOTNODE # type: RootNode self.tree = ROOTNODE_PLACEHOLDER # type: RootNode
self.context = [] # type: List[Node] self.context = [] # type: List[Node]
self._dirty_flag = False self._dirty_flag = False
...@@ -116,6 +119,7 @@ class Compiler: ...@@ -116,6 +119,7 @@ class Compiler:
(This very much depends on the kind and purpose of the (This very much depends on the kind and purpose of the
implemented compiler.) implemented compiler.)
""" """
assert root.tag_name != ZOMBIE_TAG
if self._dirty_flag: if self._dirty_flag:
self._reset() self._reset()
self._dirty_flag = True self._dirty_flag = True
......
...@@ -37,8 +37,8 @@ from DHParser.error import Error, linebreaks, line_col ...@@ -37,8 +37,8 @@ from DHParser.error import Error, linebreaks, line_col
from DHParser.log import is_logging, HistoryRecord from DHParser.log import is_logging, HistoryRecord
from DHParser.preprocess import BEGIN_TOKEN, END_TOKEN, RX_TOKEN_NAME from DHParser.preprocess import BEGIN_TOKEN, END_TOKEN, RX_TOKEN_NAME
from DHParser.stringview import StringView, EMPTY_STRING_VIEW from DHParser.stringview import StringView, EMPTY_STRING_VIEW
from DHParser.syntaxtree import Node, RootNode, WHITESPACE_PTYPE, \ from DHParser.syntaxtree import Node, FrozenNode, RootNode, WHITESPACE_PTYPE, \
TOKEN_PTYPE, ZOMBIE, ResultType TOKEN_PTYPE, ZOMBIE_TAG, ResultType
from DHParser.toolkit import sane_parser_name, escape_control_characters, re, typing, cython from DHParser.toolkit import sane_parser_name, escape_control_characters, re, typing, cython
from typing import Callable, cast, List, Tuple, Set, Dict, DefaultDict, Union, Optional, Any from typing import Callable, cast, List, Tuple, Set, Dict, DefaultDict, Union, Optional, Any
...@@ -78,6 +78,10 @@ __all__ = ('Parser', ...@@ -78,6 +78,10 @@ __all__ = ('Parser',
'Forward') 'Forward')
EMPTY_NODE = FrozenNode(':EMPTY__', '')
######################################################################## ########################################################################
# #
# Parser base class # Parser base class
...@@ -140,7 +144,7 @@ def reentry_point(rest: StringView, rules: ResumeList) -> int: ...@@ -140,7 +144,7 @@ def reentry_point(rest: StringView, rules: ResumeList) -> int:
if i == upper_limit: if i == upper_limit:
i = -1 i = -1
return i return i
# return Node(None, rest[:i]), rest[i:] # return Node(ZOMBIE_TAG, rest[:i]), rest[i:]
ApplyFunc = Callable[['Parser'], None] ApplyFunc = Callable[['Parser'], None]
...@@ -201,7 +205,7 @@ class Parser: ...@@ -201,7 +205,7 @@ class Parser:
(recursively) a second time, if it has already been (recursively) a second time, if it has already been
applied to this parser. applied to this parser.
grammar: A reference to the Grammar object to which the parser _grammar: A reference to the Grammar object to which the parser
is attached. is attached.
""" """
...@@ -209,8 +213,10 @@ class Parser: ...@@ -209,8 +213,10 @@ class Parser:
# assert isinstance(name, str), str(name) # assert isinstance(name, str), str(name)
self.pname = '' # type: str self.pname = '' # type: str
self.tag_name = self.ptype # type: str self.tag_name = self.ptype # type: str
if not isinstance(self, ZombieParser): try:
self._grammar = ZOMBIE_GRAMMAR # type: Grammar self._grammar = GRAMMAR_PLACEHOLDER # type: Grammar
except NameError:
pass
self.reset() self.reset()
def __deepcopy__(self, memo): def __deepcopy__(self, memo):
...@@ -256,7 +262,7 @@ class Parser: ...@@ -256,7 +262,7 @@ class Parser:
the business intelligence that is common to all parsers. The actual parsing is the business intelligence that is common to all parsers. The actual parsing is
done in the overridden method `_parse()`. done in the overridden method `_parse()`.
""" """
grammar = self._grammar # read protected member instead property to avoid function call grammar = self._grammar
location = grammar.document_length__ - len(text) location = grammar.document_length__ - len(text)
try: try:
...@@ -294,7 +300,7 @@ class Parser: ...@@ -294,7 +300,7 @@ class Parser:
# apply reentry-rule or catch error at root-parser # apply reentry-rule or catch error at root-parser
if i < 0: if i < 0:
i = 1 i = 1
nd = Node(None, rest[:i]) nd = Node(ZOMBIE_TAG, rest[:i])
rest = rest[i:] rest = rest[i:]
assert error.node.children assert error.node.children
if error.first_throw: if error.first_throw:
...@@ -303,11 +309,11 @@ class Parser: ...@@ -303,11 +309,11 @@ class Parser:
else: else:
# TODO: ggf. Fehlermeldung, die sagt, wo es weitergeht anfügen # TODO: ggf. Fehlermeldung, die sagt, wo es weitergeht anfügen
# dürfte allerdings erst an den nächsten(!) Knoten angehängt werden (wie?) # dürfte allerdings erst an den nächsten(!) Knoten angehängt werden (wie?)
node = Node(self.tag_name, (Node(None, text[:gap]), error.node, nd)) node = Node(self.tag_name, (Node(ZOMBIE_TAG, text[:gap]), error.node, nd))
elif error.first_throw: elif error.first_throw:
raise ParserError(error.node, error.rest, first_throw=False) raise ParserError(error.node, error.rest, first_throw=False)
else: else:
result = (Node(None, text[:gap]), error.node) if gap else error.node # type: ResultType result = (Node(ZOMBIE_TAG, text[:gap]), error.node) if gap else error.node # type: ResultType
raise ParserError(Node(self.tag_name, result), text, first_throw=False) raise ParserError(Node(self.tag_name, result), text, first_throw=False)
if grammar.left_recursion_handling__: if grammar.left_recursion_handling__:
...@@ -326,9 +332,10 @@ class Parser: ...@@ -326,9 +332,10 @@ class Parser:
# otherwise also cache None-results # otherwise also cache None-results
self.visited[location] = (None, rest) self.visited[location] = (None, rest)
else: else:
assert node._pos < 0 assert node._pos < 0 or node == EMPTY_NODE
node._pos = location node._pos = location
assert node._pos >= 0, str("%i < %i" % (grammar.document_length__, location)) assert node._pos >= 0 or node == EMPTY_NODE, \
str("%i < %i" % (grammar.document_length__, location))
if (grammar.last_rb__loc__ < location if (grammar.last_rb__loc__ < location
and (grammar.memoization__ or location in grammar.recursion_locations__)): and (grammar.memoization__ or location in grammar.recursion_locations__)):
# - variable manipulating parsers will not be entered into the cache, # - variable manipulating parsers will not be entered into the cache,
...@@ -352,7 +359,7 @@ class Parser: ...@@ -352,7 +359,7 @@ class Parser:
grammar.call_stack__.pop() grammar.call_stack__.pop()
except RecursionError: except RecursionError:
node = Node(None, str(text[:min(10, max(1, text.find("\n")))]) + " ...") node = Node(ZOMBIE_TAG, str(text[:min(10, max(1, text.find("\n")))]) + " ...")
node._pos = location node._pos = location
grammar.tree__.new_error(node, "maximum recursion depth of parser reached; " grammar.tree__.new_error(node, "maximum recursion depth of parser reached; "
"potentially due to too many errors!") "potentially due to too many errors!")
...@@ -376,28 +383,34 @@ class Parser: ...@@ -376,28 +383,34 @@ class Parser:
the results or None as well as the text at the position right behind the results or None as well as the text at the position right behind
the matching string.""" the matching string."""
raise NotImplementedError raise NotImplementedError
# return None, text # default behaviour: don't match
@property @property
def grammar(self) -> 'Grammar': def grammar(self) -> 'Grammar':
if self._grammar != ZOMBIE_GRAMMAR: try:
return self._grammar grammar = self._grammar
else: if self._grammar != GRAMMAR_PLACEHOLDER:
raise AssertionError('Grammar has not yet been set!') return self._grammar
else:
raise AssertionError('Grammar has not yet been set!')
except AttributeError:
raise AssertionError('Parser placeholder does not have a grammar!')
@grammar.setter @grammar.setter
def grammar(self, grammar: 'Grammar'): def grammar(self, grammar: 'Grammar'):
if self._grammar == ZOMBIE_GRAMMAR: try:
self._grammar = grammar if self._grammar == GRAMMAR_PLACEHOLDER:
self._grammar_assigned_notifier() self._grammar = grammar
elif self._grammar != grammar: # self._grammar_assigned_notifier()
raise AssertionError("Parser has already been assigned" elif self._grammar != grammar:
"to a different Grammar object!") raise AssertionError("Parser has already been assigned"
"to a different Grammar object!")
def _grammar_assigned_notifier(self): except AttributeError:
"""A function that notifies the parser object that it has been pass # ignore setting of grammar attribute for placeholder parser
assigned to a grammar."""
pass # def _grammar_assigned_notifier(self):
# """A function that notifies the parser object that it has been
# assigned to a grammar."""
# pass
def _apply(self, func: ApplyFunc, flip: FlagFunc) -> bool: def _apply(self, func: ApplyFunc, flip: FlagFunc) -> bool:
""" """
...@@ -449,50 +462,7 @@ class Parser: ...@@ -449,50 +462,7 @@ class Parser:
self._apply(func, positive_flip) self._apply(func, positive_flip)
class ZombieParser(Parser): PARSER_PLACEHOLDER = Parser()
"""
Serves as a substitute for a Parser instance.
``ZombieParser`` is the class of the singelton object
``ZOMBIE_PARSER``. The ``ZOMBIE_PARSER`` has a name and can be
called, but it never matches. It serves as a substitute where only
these (or one of these properties) is needed, but no real Parser-
object is instantiated.
"""
alive = [False] # cython compatibility: cython forbits writing to class attributes
def __init__(self):
super().__init__()
self.pname = ZOMBIE
self.tag_name = ZOMBIE
# no need to call super class constructor
assert not ZombieParser.alive[0], "There can be only one!"
assert self.__class__ == ZombieParser, "No derivatives, please!"
ZombieParser.alive[0] = True
self.reset()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __call__(self, text):
raise AssertionError("Better call Saul ;-)")
@property
def grammar(self) -> 'Grammar':
raise AssertionError("Zombie parser doesn't have a grammar!")
@grammar.setter
def grammar(self, grammar: 'Grammar'):
raise AssertionError('Cannot assign a grammar a zombie parser or vice versa!')
def apply(self, func: ApplyFunc):
return "Eaten alive..."
ZOMBIE_PARSER = ZombieParser()
######################################################################## ########################################################################
...@@ -702,7 +672,7 @@ class Grammar: ...@@ -702,7 +672,7 @@ class Grammar:
recursion. recursion.
""" """
python_src__ = '' # type: str python_src__ = '' # type: str
root__ = ZOMBIE_PARSER # type: Parser root__ = PARSER_PLACEHOLDER # type: Parser
# root__ must be overwritten with the root-parser by grammar subclass # root__ must be overwritten with the root-parser by grammar subclass
parser_initialization__ = ["pending"] # type: list[str] parser_initialization__ = ["pending"] # type: list[str]
resume_rules__ = dict() # type: Dict[str, ResumeList] resume_rules__ = dict() # type: Dict[str, ResumeList]
...@@ -885,7 +855,7 @@ class Grammar: ...@@ -885,7 +855,7 @@ class Grammar:
if not rest: if not rest:
result, _ = parser(rest) result, _ = parser(rest)
if result is None: if result is None:
result = Node(None, '').init_pos(0) result = Node(ZOMBIE_TAG, '').init_pos(0)
self.tree__.new_error(result, self.tree__.new_error(result,
'Parser "%s" did not match empty document.' % str(parser), 'Parser "%s" did not match empty document.' % str(parser),
Error.PARSER_DID_NOT_MATCH) Error.PARSER_DID_NOT_MATCH)
...@@ -924,7 +894,7 @@ class Grammar: ...@@ -924,7 +894,7 @@ class Grammar:
if len(stitches) < MAX_DROPOUTS if len(stitches) < MAX_DROPOUTS
else " too often! Terminating parser.") else " too often! Terminating parser.")
error_code = Error.PARSER_STOPPED_BEFORE_END error_code = Error.PARSER_STOPPED_BEFORE_END
stitches.append(Node(None, skip).init_pos(tail_pos(stitches))) stitches.append(Node(ZOMBIE_TAG, skip).init_pos(tail_pos(stitches)))
self.tree__.new_error(stitches[-1], error_msg, error_code) self.tree__.new_error(stitches[-1], error_msg, error_code)
if self.history_tracking__: if self.history_tracking__:
# # some parsers may have matched and left history records with nodes != None. # # some parsers may have matched and left history records with nodes != None.
...@@ -941,12 +911,12 @@ class Grammar: ...@@ -941,12 +911,12 @@ class Grammar:
self.history_tracking__ = False self.history_tracking__ = False
if stitches: if stitches:
if rest: if rest:
stitches.append(Node(None, rest)) stitches.append(Node(ZOMBIE_TAG, rest))
#try: #try:
result = Node(None, tuple(stitches)).init_pos(0) result = Node(ZOMBIE_TAG, tuple(stitches)).init_pos(0)
# except AssertionError as error: # except AssertionError as error:
# # some debugging output # # some debugging output
# print(Node(None, tuple(stitches)).as_sxpr()) # print(Node(ZOMBIE_TAG, tuple(stitches)).as_sxpr())
# raise error # raise error
if any(self.variables__.values()): if any(self.variables__.values()):
error_msg = "Capture-retrieve-stack not empty after end of parsing: " \ error_msg = "Capture-retrieve-stack not empty after end of parsing: " \
...@@ -957,7 +927,7 @@ class Grammar: ...@@ -957,7 +927,7 @@ class Grammar:
# add another child node at the end to ensure that the position # add another child node at the end to ensure that the position
# of the error will be the end of the text. Otherwise, the error # of the error will be the end of the text. Otherwise, the error
# message above ("...after end of parsing") would appear illogical. # message above ("...after end of parsing") would appear illogical.
error_node = Node(ZOMBIE, '').init_pos(tail_pos(result.children)) error_node = Node(ZOMBIE_TAG, '').init_pos(tail_pos(result.children))
self.tree__.new_error(error_node, error_msg, error_code) self.tree__.new_error(error_node, error_msg, error_code)
result.result = result.children + (error_node,) result.result = result.children + (error_node,)
else: else:
...@@ -1028,7 +998,7 @@ def dsl_error_msg(parser: Parser, error_str: str) -> str: ...@@ -1028,7 +998,7 @@ def dsl_error_msg(parser: Parser, error_str: str) -> str:
return " ".join(msg) return " ".join(msg)
ZOMBIE_GRAMMAR = Grammar() GRAMMAR_PLACEHOLDER = Grammar()
######################################################################## ########################################################################
...@@ -1163,21 +1133,6 @@ class RegExp(Parser): ...@@ -1163,21 +1133,6 @@ class RegExp(Parser):
if match: if match:
capture = match.group(0) capture = match.group(0)
end = text.index(match.end()) end = text.index(match.end())
# regular expression must never match preprocessor-tokens!
# Should never happen, anyway, as long as text characters do not
# fall into the range below 0x20
# # Find a better solution here? e.g. static checking/re-mangling at compile time
# # Needs testing!!!
# i = capture.find(BEGIN_TOKEN)
# if i >= 0:
# capture = capture[:i]
# end = i
# m = capture[:end].match(self.regexp)
# if m:
# capture = m.group(0)
# end = text.index(m.end())
# else:
# return None, text
return Node(self.tag_name, capture, True), text[end:] return Node(self.tag_name, capture, True), text[end:]
return None, text return None, text
...@@ -1217,13 +1172,17 @@ class Whitespace(RegExp): ...@@ -1217,13 +1172,17 @@ class Whitespace(RegExp):
is a RegExp-parser for whitespace.""" is a RegExp-parser for whitespace."""
assert WHITESPACE_PTYPE == ":Whitespace" assert WHITESPACE_PTYPE == ":Whitespace"
# def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]: def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
# match = text.match(self.regexp) match = text.match(self.regexp)
# if match: if match:
# capture = match.group(0) capture = match.group(0)
# end = text.index(match.end()) if capture or self.pname:
# return Node(self.tag_name, capture, True), text[end:] end = text.index(match.end())
# return None, text return Node(self.tag_name, capture, True), text[end:]
else:
# avoid creation of a node object for empty nodes
return EMPTY_NODE, text
return None, text
def __repr__(self): def __repr__(self):
return '~' return '~'
...@@ -1318,7 +1277,7 @@ class Option(UnaryOperator): ...@@ -1318,7 +1277,7 @@ class Option(UnaryOperator):
>>> Grammar(number)('3.14159').content >>> Grammar(number)('3.14159').content
'3.14159' '3.14159'
>>> Grammar(number)('3.14159').structure >>> Grammar(number)('3.14159').structure
'(:Series (:Option) (:RegExp "3") (:Option (:RegExp ".14159")))' '(:Series (:RegExp "3") (:Option (:RegExp ".14159")))'
>>> Grammar(number)('-1').content >>> Grammar(number)('-1').content
'-1' '-1'
...@@ -1335,9 +1294,13 @@ class Option(UnaryOperator): ...@@ -1335,9 +1294,13 @@ class Option(UnaryOperator):
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]: def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
node, text = self.parser(text) node, text = self.parser(text)
if node: if node and (node._result or self.parser.pname):
return Node(self.tag_name, node), text return Node(self.tag_name, node), text
return Node(self.tag_name, ()), text if self.pname:
return Node(self.tag_name, ()), text
else:
# avoid creation of a node object for empty nodes
return EMPTY_NODE, text
def __repr__(self): def __repr__(self):
return '[' + (self.parser.repr[1:-1] if isinstance(self.parser, Alternative) return '[' + (self.parser.repr[1:-1] if isinstance(self.parser, Alternative)
...@@ -1451,7 +1414,7 @@ def mandatory_violation(grammar: Grammar, ...@@ -1451,7 +1414,7 @@ def mandatory_violation(grammar: Grammar,
reloc: int) -> Tuple[Error, Node, StringView]: reloc: int) -> Tuple[Error, Node, StringView]:
i = reloc if reloc >= 0 else 0 i = reloc if reloc >= 0 else 0
location = grammar.document_length__ - len(text_) location = grammar.document_length__ - len(text_)
err_node = Node(None, text_[:i]).init_pos(location) err_node = Node(ZOMBIE_TAG, text_[:i]).init_pos(location)
found = text_[:10].replace('\n', '\\n ') found = text_[:10].replace('\n', '\\n ')
for search, message in err_msgs: for search, message in err_msgs:
rxs = not isinstance(search, str) rxs = not isinstance(search, str)
...@@ -1557,9 +1520,10 @@ class Series(NaryOperator): ...@@ -1557,9 +1520,10 @@ class Series(NaryOperator):
else: else:
results += (node,) results += (node,)
break break
results += (node,) if node._result or parser.pname: # optimization: drop anonymous empty nodes
results += (node,)
# assert len(results) <= len(self.parsers) \ # assert len(results) <= len(self.parsers) \
# or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE]) # or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE_TAG])
node = Node(self.tag_name, results) node = Node(self.tag_name, results)
if error: if error:
raise ParserError(node, text, first_throw=True) raise ParserError(node, text, first_throw=True)
...@@ -1754,8 +1718,9 @@ class AllOf(NaryOperator): ...@@ -1754,8 +1718,9 @@ class AllOf(NaryOperator):
for i, parser in enumerate(parsers): for i, parser in enumerate(parsers):
node, text__ = parser(text_) node, text__ = parser(text_)
if node: if node:
results += (node,) if node._result or parser.pname:
text_ = text__ results += (node,)
text_ = text__
del parsers[i] del parsers[i]
break break
else: else:
...@@ -1770,7 +1735,7 @@ class AllOf(NaryOperator): ...@@ -1770,7 +1735,7 @@ class AllOf(NaryOperator):
if reloc < 0: if reloc < 0:
parsers = [] parsers = []
assert len(results) <= len(self.parsers) \ assert len(results) <= len(self.parsers) \
or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE]) or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE_TAG])
node = Node(self.tag_name, results) node = Node(self.tag_name, results)
if error: if error:
raise ParserError(node, text, first_throw=True) raise ParserError(node, text, first_throw=True)
...@@ -1818,8 +1783,9 @@ class SomeOf(NaryOperator): ...@@ -1818,8 +1783,9 @@ class SomeOf(NaryOperator):
for i, parser in enumerate(parsers): for i, parser in enumerate(parsers):
node, text__ = parser(text_) node, text__ = parser(text_)
if node: if node:
results += (node,) if node._result or parser.pname:
text_ = text__ results += (node,)
text_ = text__
del parsers[i] del parsers[i]
break break
else: else:
......
...@@ -34,14 +34,14 @@ from typing import Callable, cast, Iterator, List, AbstractSet, Set, Union, Tupl ...@@ -34,14 +34,14 @@ from typing import Callable, cast, Iterator, List, AbstractSet, Set, Union, Tupl
__all__ = ('WHITESPACE_PTYPE', __all__ = ('WHITESPACE_PTYPE',
'TOKEN_PTYPE', 'TOKEN_PTYPE',
'ZOMBIE', 'ZOMBIE_TAG',
'ZOMBIE_NODE', 'PLACEHOLDER',
'ResultType', 'ResultType',
'StrictResultType', 'StrictResultType',
'ChildrenType', 'ChildrenType',
'Node', 'Node',
'FrozenNode',
'RootNode', 'RootNode',
'ZOMBIE_ROOTNODE',
'parse_sxpr', 'parse_sxpr',
'parse_xml', 'parse_xml',
'flatten_sxpr', 'flatten_sxpr',
...@@ -58,7 +58,7 @@ __all__ = ('WHITESPACE_PTYPE', ...@@ -58,7 +58,7 @@ __all__ = ('WHITESPACE_PTYPE',
WHITESPACE_PTYPE = ':Whitespace' WHITESPACE_PTYPE = ':Whitespace'
TOKEN_PTYPE = ':Token' TOKEN_PTYPE = ':Token'
ZOMBIE = "__ZOMBIE__" ZOMBIE_TAG = "__ZOMBIE__"
####################################################################### #######################################################################
# #
...@@ -162,7 +162,7 @@ class Node: # (collections.abc.Sized): Base class omitted for cython-compatibil ...@@ -162,7 +162,7 @@ class Node: # (collections.abc.Sized): Base class omitted for cython-compatibil
__slots__ = '_result', 'children', '_len', '_pos', 'tag_name', 'errors', '_xml_attr', '_content' __slots__ = '_result', 'children', '_len', '_pos', 'tag_name', 'errors', '_xml_attr', '_content'
def __init__(self, tag_name: Optional[str], result: ResultType, leafhint: bool = False) -> None: def __init__(self, tag_name: str, result: ResultType, leafhint: bool = False) -> None: