Commit fd15c3cd authored by eckhart's avatar eckhart
Browse files

terminology: dispoable parser instead of anonymous parser

parent 73e923c1
......@@ -238,7 +238,7 @@ class Compiler:
result of the compilation.
The method's name is derived from either the node's parser
name or, if the parser is anonymous, the node's parser's class
name or, if the parser is disposable, the node's parser's class
name by adding the prefix ``on_``.
Note that ``compile`` does not call any compilation functions
......
......@@ -399,7 +399,7 @@ CONFIG_PRESET['add_grammar_source_to_parser_docstring'] = False
# r'_' catches names with a leading underscore. The default value is a
# regular expression that matches no string whatsoever.
# Default value: r'..(?<=^)' # never match.
CONFIG_PRESET['default_anonymous_regexp'] = r'..(?<=^)'
CONFIG_PRESET['default_disposable_regexp'] = r'..(?<=^)'
# Default value for implicit insignificant whitespace adjacent to literals.
......
......@@ -258,7 +258,7 @@ only as placeholders to render the definition of the grammar a bit
more readable, not because we are intested in the text that is
captured by the production associated with them in their own right::
>>> anonymize_symbols = '@ anonymous = /_\w+/ \\n'
>>> disposable_symbols = '@ disposable = /_\w+/ \\n'
Instead of passing a comma-separated list of symbols to the directive,
which would also have been possible, we have leveraged our convention
......@@ -267,7 +267,7 @@ symbols that shall by anonymized with a regular expression.
Now, let's examine the effect of these two directives::
>>> grammar = drop_insignificant_wsp + anonymize_symbols + grammar
>>> grammar = drop_insignificant_wsp + disposable_symbols + grammar
>>> parser = create_parser(grammar, 'JSON')
>>> syntax_tree = parser(testdata)
>>> syntax_tree.content
......@@ -454,7 +454,7 @@ class EBNFGrammar(Grammar):
@ whitespace = /\s*/ # whitespace includes linefeed
@ literalws = right # trailing whitespace of literals will be ignored tacitly
@ anonymous = pure_elem, countable, FOLLOW_UP, SYM_REGEX, ANY_SUFFIX, EOF
@ disposable = pure_elem, countable, FOLLOW_UP, SYM_REGEX, ANY_SUFFIX, EOF
@ drop = whitespace, EOF # do not include these even in the concrete syntax tree
@ RNG_BRACE_filter = matching_bracket() # filter or transform content of RNG_BRACE on retrieve
......@@ -592,7 +592,7 @@ class EBNFGrammar(Grammar):
element = Forward()
expression = Forward()
source_hash__ = "3bda01686407a47a9fd0a709bda53ae3"
anonymous__ = re.compile('component$|pure_elem$|countable$|FOLLOW_UP$|SYM_REGEX$|ANY_SUFFIX$|EOF$')
disposable__ = re.compile('component$|pure_elem$|countable$|FOLLOW_UP$|SYM_REGEX$|ANY_SUFFIX$|EOF$')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
error_messages__ = {'definition': [
......@@ -794,7 +794,7 @@ class FixedEBNFGrammar(Grammar):
# or python-style: # ... \n, excluding, however, character markers: #x20
@ whitespace = /\s*/ # whitespace includes linefeed
@ literalws = right # trailing whitespace of literals will be ignored tacitly
@ anonymous = component, pure_elem, countable, FOLLOW_UP, SYM_REGEX, ANY_SUFFIX, EOF
@ disposable = component, pure_elem, countable, FOLLOW_UP, SYM_REGEX, ANY_SUFFIX, EOF
@ drop = whitespace, EOF # do not include these even in the concrete syntax tree
@ RNG_BRACE_filter = matching_bracket() # filter or transform content of RNG_BRACE on retrieve
......@@ -934,7 +934,7 @@ class FixedEBNFGrammar(Grammar):
element = Forward()
expression = Forward()
source_hash__ = "d0735678e82e6d7cbf75958080a607ff"
anonymous__ = re.compile('component$|pure_elem$|countable$|FOLLOW_UP$|SYM_REGEX$|ANY_SUFFIX$|EOF$')
disposable__ = re.compile('component$|pure_elem$|countable$|FOLLOW_UP$|SYM_REGEX$|ANY_SUFFIX$|EOF$')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
error_messages__ = {
......@@ -1262,13 +1262,13 @@ DROP_VALUES = {DROP_STRINGS, DROP_BACKTICKED, DROP_WSPC, DROP_REGEXP}
ReprType = Union[str, unrepr]
KNOWN_DIRECTIVES = {
VALID_DIRECTIVES = {
'comment': r'Regular expression for comments, e.g. /#.*(?:\n|$)/',
'whitespace': r'Regular expression for whitespace, e.g. /\s*/',
'literalws': 'Controls implicit whitespace adjacent to literals: left, right, both, none',
'ignorecase': 'Controls case-sensitivity: on, off',
'[preprocessor_]tokens': 'List of the names of all preprocessor tokens',
'anonymous': 'List of symbols that are NOT to appear as tag-names',
'disposable': 'List of symbols that are NOT to appear as tag-names',
'drop': 'List of tags to be dropped early from syntax tree, '
'special values: strings, whitespace, regexps',
'$SYMBOL_filer': 'Function that transforms captured values of the given symbol on retrieval',
......@@ -1322,7 +1322,7 @@ class EBNFDirectives:
drop: A set that may contain the elements `DROP_STRINGS` and
`DROP_WSP', 'DROP_REGEXP' or any name of a symbol
of an anonymous parser (e.g. '_linefeed') the results
of a disposable parser (e.g. '_linefeed') the results
of which will be dropped during the parsing process,
already.
......@@ -1467,12 +1467,12 @@ class EBNFCompiler(Compiler):
re_flags: A set of regular expression flags to be added to all
regular expressions found in the current parsing process
anonymous_regexp: A regular expression to identify symbols that stand
disposable_regexp: A regular expression to identify symbols that stand
for parsers that shall yield anonymous nodes. The pattern of
the regular expression is configured in configuration.py but
can also be set by a directive. The default value is a regular
expression that catches names with a leading underscore.
See also `parser.Grammar.anonymous__`
See also `parser.Grammar.disposable__`
python_src: A string that contains the python source code that was
the outcome of the last EBNF-compilation.
......@@ -1548,7 +1548,7 @@ class EBNFCompiler(Compiler):
self.defined_directives = dict() # type: Dict[str, List[Node]]
self.consumed_custom_errors = set() # type: Set[str]
self.consumed_skip_rules = set() # type: Set[str]
self.anonymous_regexp = re.compile(get_config_value('default_anonymous_regexp'))
self.disposable_regexp = re.compile(get_config_value('default_disposable_regexp'))
self.grammar_id += 1
......@@ -2037,8 +2037,8 @@ class EBNFCompiler(Compiler):
+ ('. Grammar:' if self.grammar_source and show_source else '.')]
definitions.append(('parser_initialization__', '["upon instantiation"]'))
definitions.append(('static_analysis_pending__', '[True]'))
definitions.append(('anonymous__',
're.compile(' + repr(self.anonymous_regexp.pattern) + ')'))
definitions.append(('disposable__',
're.compile(' + repr(self.disposable_regexp.pattern) + ')'))
if self.grammar_source:
definitions.append(('source_hash__',
'"%s"' % md5(self.grammar_source, __version__)))
......@@ -2227,12 +2227,12 @@ class EBNFCompiler(Compiler):
nd.result = "".join(parts)
nd.tag_name = "literal"
def add_to_anonymous_regexp(self, pattern):
if self.anonymous_regexp is RX_NEVER_MATCH:
self.anonymous_regexp = re.compile(pattern)
def add_to_disposable_regexp(self, pattern):
if self.disposable_regexp is RX_NEVER_MATCH:
self.disposable_regexp = re.compile(pattern)
else:
old_pattern = self.anonymous_regexp.pattern
self.anonymous_regexp = re.compile('(?:%s)|(?:%s)' % (old_pattern, pattern))
old_pattern = self.disposable_regexp.pattern
self.disposable_regexp = re.compile('(?:%s)|(?:%s)' % (old_pattern, pattern))
def on_directive(self, node: Node) -> str:
for child in node.children:
......@@ -2273,7 +2273,7 @@ class EBNFCompiler(Compiler):
"match the empty string, /%s/ does not." % value)
self.directives[key] = value
elif key == 'anonymous':
elif key == 'disposable':
if node.children[1].tag_name == "regexp":
check_argnum()
re_pattern = node.children[1].content
......@@ -2282,7 +2282,7 @@ class EBNFCompiler(Compiler):
node, "The regular expression r'%s' matches any symbol, "
"which is not allowed!" % re_pattern)
else:
self.add_to_anonymous_regexp(re_pattern)
self.add_to_disposable_regexp(re_pattern)
else:
args = node.children[1:]
assert all(child.tag_name == "symbol" for child in args)
......@@ -2290,7 +2290,7 @@ class EBNFCompiler(Compiler):
for asym in alist:
if asym not in self.symbols:
self.symbols[asym] = node
self.add_to_anonymous_regexp('$|'.join(alist) + '$')
self.add_to_disposable_regexp('$|'.join(alist) + '$')
elif key == 'drop':
if len(node.children) <= 1:
......@@ -2298,25 +2298,25 @@ class EBNFCompiler(Compiler):
unmatched = [] # type: List[str] # dropped syms that are not already anonymous syms
for child in node.children[1:]:
content = child.content
if self.anonymous_regexp.match(content):
if self.disposable_regexp.match(content):
self.directives[key].add(content)
elif content.lower() in DROP_VALUES:
self.directives[key].add(content.lower())
else:
unmatched.append(content)
if self.anonymous_regexp == RX_NEVER_MATCH:
if self.disposable_regexp == RX_NEVER_MATCH:
self.tree.new_error(node, 'Illegal value "%s" for Directive "@ drop"! '
'Should be one of %s or an anonymous parser, where '
'the "@anonymous"-directive must precede the '
'Should be one of %s or a disposable parser, where '
'the "@disposable"-directive must precede the '
'@drop-directive.' % (content, str(DROP_VALUES)))
else:
self.tree.new_error(
node, 'Illegal value "%s" for Directive "@ drop"! Should be one of '
'%s or a string matching r"%s".' % (content, str(DROP_VALUES),
self.anonymous_regexp.pattern))
self.disposable_regexp.pattern))
if unmatched:
self.directives[key].add(content)
self.add_to_anonymous_regexp('$|'.join(unmatched) + '$')
self.add_to_disposable_regexp('$|'.join(unmatched) + '$')
elif key == 'ignorecase':
check_argnum()
......@@ -2391,7 +2391,7 @@ class EBNFCompiler(Compiler):
'the symbolname. Please, write: "%s"' % (kl[0], proper_usage))
else:
self.tree.new_error(node, 'Unknown directive %s ! (Known directives: %s.)' %
(key, ', '.join(k for k in KNOWN_DIRECTIVES.keys())))
(key, ', '.join(k for k in VALID_DIRECTIVES.keys())))
return ""
......@@ -2640,9 +2640,9 @@ class EBNFCompiler(Compiler):
node.result = node.children[1:]
assert prefix in {'::', ':?', ':'}
if self.anonymous_regexp.match(arg):
if self.disposable_regexp.match(arg):
self.tree.new_error(
node, 'Retrieve operator "%s" does not work with anonymous parsers like %s'
node, 'Retrieve operator "%s" does not work with disposable parsers like %s'
% (prefix, arg))
return arg
......
......@@ -341,8 +341,8 @@ class Parser:
The results produced by these parsers can later be retrieved in
the AST by the parser name.
2. *Anonymous parsers* where the name-field just contains the empty
string. AST-transformation of Anonymous parsers can be hooked
2. *Disposable parsers* where the name-field just contains the empty
string. AST-transformation of disposable parsers can be hooked
only to their class name, and not to the individual parser.
Parser objects are callable and parsing is done by calling a parser
......@@ -362,13 +362,12 @@ class Parser:
Attributes and Properties:
pname: The parser's name or a (possibly empty) alias name in case
of an anonymous parser.
pname: The parser's name.
anonymous: A property indicating that the parser remains anonymous
anonymous with respect to the nodes it returns. For performance
disposable: A property indicating that the parser returns
anonymous nodes. For performance
reasons this is implemented as an object variable rather
than a property. This property must always be equal to
than a property. This property should always be equal to
`self.tag_name[0] == ":"`.
drop_content: A property (for performance reasons implemented as
......@@ -410,7 +409,7 @@ class Parser:
def __init__(self) -> None:
# assert isinstance(name, str), str(name)
self.pname = '' # type: str
self.anonymous = True # type: bool
self.disposable = True # type: bool
self.drop_content = False # type: bool
self.tag_name = self.ptype # type: str
self.cycle_detection = set() # type: Set[ApplyFunc]
......@@ -727,14 +726,14 @@ class Parser:
def copy_parser_base_attrs(src: Parser, duplicate: Parser):
"""Duplicates all attributes of the Parser-class from `src` to `duplicate`."""
duplicate.pname = src.pname
duplicate.anonymous = src.anonymous
duplicate.disposable = src.disposable
duplicate.drop_content = src.drop_content
duplicate.tag_name = src.tag_name
def Drop(parser: Parser) -> Parser:
"""Returns the parser with the `parser.drop_content`-property set to `True`."""
assert parser.anonymous, "Parser must be anonymous to be allowed to drop ist content."
assert parser.disposable, "Parser must be anonymous to be allowed to drop ist content."
if isinstance(parser, Forward):
cast(Forward, parser).parser.drop_content = True
parser.drop_content = True
......@@ -905,7 +904,7 @@ class Grammar:
field `parser.pname` contains the variable name after instantiation
of the Grammar class. The parser will never the less remain anonymous
with respect to the tag names of the nodes it generates, if its name
is matched by the `anonymous__` regular expression.
is matched by the `disposable__` regular expression.
If one and the same parser is assigned to several class variables
such as, for example, the parser `expression` in the example above,
which is also assigned to `root__`, the first name sticks.
......@@ -944,12 +943,10 @@ class Grammar:
where a semi-colon ";" is expected) with more informative error
messages.
anonymous__: A regular expression to identify names of parsers that are
disposable__: A regular expression to identify names of parsers that are
assigned to class fields but shall never the less yield anonymous
nodes (i.e. nodes the tag name of which starts with a colon ":"
followed by the parser's class name). The default is to treat all
parsers starting with an underscore as anonymous in addition to those
parsers that are not directly assigned to a class field.
followed by the parser's class name).
parser_initialization__: Before the grammar class (!) has been initialized,
which happens upon the first time it is instantiated (see
......@@ -1112,10 +1109,10 @@ class Grammar:
root__ = PARSER_PLACEHOLDER # type: Parser
# root__ must be overwritten with the root-parser by grammar subclass
parser_initialization__ = ["pending"] # type: List[str]
resume_rules__ = dict() # type: Dict[str, ResumeList]
skip_rules__ = dict() # type: Dict[str, ResumeList]
error_messages__ = dict() # type: Dict[str, Tuple[PatternMatchType, str]]
anonymous__ = RX_NEVER_MATCH # type: RxPatternType
resume_rules__ = dict() # type: Dict[str, ResumeList]
skip_rules__ = dict() # type: Dict[str, ResumeList]
error_messages__ = dict() # type: Dict[str, Tuple[PatternMatchType, str]]
disposable__ = RX_NEVER_MATCH # type: RxPatternType
# some default values
COMMENT__ = r'' # type: str # r'#.*(?:\n|$)'
WHITESPACE__ = r'[\t ]*'
......@@ -1151,15 +1148,15 @@ class Grammar:
cdict = cls.__dict__
for entry, parser in cdict.items():
if isinstance(parser, Parser) and sane_parser_name(entry):
anonymous = True if cls.anonymous__.match(entry) else False
anonymous = True if cls.disposable__.match(entry) else False
assert anonymous or not parser.drop_content, entry
if isinstance(parser, Forward):
if not cast(Forward, parser).parser.pname:
cast(Forward, parser).parser.pname = entry
cast(Forward, parser).parser.anonymous = anonymous
cast(Forward, parser).parser.disposable = anonymous
else:
parser.pname = entry
parser.anonymous = anonymous
parser.disposable = anonymous
if cls != Grammar:
cls.parser_initialization__ = ["done"] # (over-)write subclass-variable
# cls.parser_initialization__[0] = "done"
......@@ -1328,8 +1325,8 @@ class Grammar:
% (parser.pname, str(self.__dict__[parser.pname])))
setattr(self, parser.pname, parser)
parser.tag_name = parser.pname
if parser.anonymous:
parser.tag_name += parser.ptype
if parser.disposable:
parser.tag_name = parser.ptype
self.all_parsers__.add(parser)
parser.grammar = self
......@@ -1777,7 +1774,7 @@ class Text(Parser):
if text.startswith(self.text):
if self.drop_content:
return EMPTY_NODE, text[self.len:]
elif self.text or not self.anonymous:
elif self.text or not self.disposable:
return Node(self.tag_name, self.text, True), text[self.len:]
return EMPTY_NODE, text
return None, text
......@@ -1825,7 +1822,7 @@ class RegExp(Parser):
match = text.match(self.regexp)
if match:
capture = match.group(0)
if capture or not self.anonymous:
if capture or not self.disposable:
end = text.index(match.end())
if self.drop_content:
return EMPTY_NODE, text[end:]
......@@ -1963,16 +1960,14 @@ class CombinedParser(Parser):
assert node is None or isinstance(node, Node)
if self._grammar.flatten_tree__:
if node is not None:
if self.anonymous:
if self.disposable:
if self.drop_content:
return EMPTY_NODE
# if node.anonymous and node.pname:
# node.tag_name = self.tag_name
return node
if node.anonymous:
return Node(self.tag_name, node._result)
return Node(self.tag_name, node)
elif self.anonymous:
elif self.disposable:
return EMPTY_NODE # avoid creation of a node object for anonymous empty nodes
return Node(self.tag_name, ())
if self.drop_content:
......@@ -2000,7 +1995,7 @@ class CombinedParser(Parser):
nr.extend(child.children)
elif child._result or not child.anonymous:
nr.append(child)
if nr or not self.anonymous:
if nr or not self.disposable:
return Node(self.tag_name, tuple(nr))
else:
return EMPTY_NODE
......@@ -2008,7 +2003,7 @@ class CombinedParser(Parser):
elif N == 1:
return self._return_value(results[0])
elif self._grammar.flatten_tree__:
if self.anonymous:
if self.disposable:
return EMPTY_NODE # avoid creation of a node object for anonymous empty nodes
return Node(self.tag_name, ())
return Node(self.tag_name, tuple(results)) # unoptimized code
......@@ -2905,7 +2900,7 @@ class Lookahead(FlowParser):
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
node, _ = self.parser(text)
if self.sign(node is not None):
return (EMPTY_NODE if self.anonymous else Node(self.tag_name, '')), text
return (EMPTY_NODE if self.disposable else Node(self.tag_name, '')), text
else:
return None, text
......@@ -3175,7 +3170,7 @@ class Retrieve(ContextSensitive):
"""Returns a tag name for the retrieved node. If the Retrieve-parser
has a tag name, this overrides the tag name of the retrieved symbol's
parser."""
if self.anonymous or not self.tag_name:
if self.disposable or not self.tag_name:
if self.parser.pname:
return self.parser.tag_name
# self.parser is a Forward-Parser, so pick the name of its encapsulated parser
......@@ -3303,7 +3298,7 @@ class Synonym(UnaryParser):
if node is not None:
if self.drop_content:
return EMPTY_NODE, text
if not self.anonymous:
if not self.disposable:
if node is EMPTY_NODE:
return Node(self.tag_name, ''), text
if node.anonymous:
......@@ -3363,7 +3358,7 @@ class Forward(UnaryParser):
parser = copy.deepcopy(self.parser, memo)
duplicate.parser = parser
duplicate.pname = self.pname # Forward-Parsers should not have a name!
duplicate.anonymous = self.anonymous
duplicate.disposable = self.disposable
duplicate.tag_name = self.tag_name # Forward-Parser should not have a tag name!
duplicate.drop_content = parser.drop_content
return duplicate
......
......@@ -1005,7 +1005,7 @@ class Node: # (collections.abc.Sized): Base class omitted for cython-compatibil
practice to remove (or name) all anonymous nodes during the
AST-transformation.
"""
return not self.tag_name or self.tag_name.find(':') >= 0
return not self.tag_name or self.tag_name[0] == ':' # self.tag_name.find(':') >= 0
# node content ###
......
......@@ -10,7 +10,7 @@
@ literalws = right # literals have implicit whitespace on the right hand side
@ comment = /#.*/ # comments range from a '#'-character to the end of the line
@ ignorecase = False # literals and regular expressions are case-sensitive
# @ anonymous = EOF # EOF is considered to be an anonymous parser that can be reduced
# @ disposable = EOF # EOF is considered to be a disposable parser that can be reduced
# @ drop = EOR # ,whitespace, strings # EOF, whitespace and string literals will be dropped
......
......@@ -108,7 +108,7 @@ def trace_history(self: Parser, text: StringView) -> Tuple[Optional[Node], Strin
# Don't track returning parsers except in case an error has occurred!
if ((self.tag_name != WHITESPACE_PTYPE)
and (grammar.moving_forward__
or (not self.anonymous
or (not self.disposable
and (node
or grammar.history__ and grammar.history__[-1].node)))):
# record history
......
......@@ -496,7 +496,7 @@ def is_named(context: TreeContext) -> bool:
def is_anonymous(context: TreeContext) -> bool:
"""Returns ``True`` if the current node's parser is an anonymous parser."""
"""Returns ``True`` if the current node is anonymous."""
return context[-1].anonymous
......
......@@ -75,7 +75,7 @@ class ArithmeticGrammar(Grammar):
"""
expression = Forward()
source_hash__ = "2a01036cad49be914c8bb1cb13c532c7"
anonymous__ = re.compile('..(?<=^)')
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*'
......
......@@ -62,7 +62,7 @@ class ArithmeticGrammar(Grammar):
"""
expression = Forward()
source_hash__ = "2a01036cad49be914c8bb1cb13c532c7"
anonymous__ = re.compile('..(?<=^)')
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*'
......
......@@ -76,7 +76,7 @@ class LameArithmeticGrammar(Grammar):
expr = Forward()
term = Forward()
source_hash__ = "69ae2dadf5f31fee7d8ec0d09b3a8659"
anonymous__ = re.compile('..(?<=^)')
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r''
......
......@@ -65,7 +65,7 @@ class ArithmeticRightRecursiveGrammar(Grammar):
tail = Forward()
term = Forward()
source_hash__ = "34a84d77ec6f6b92070f9e4fd0a5d3ca"
anonymous__ = re.compile('..(?<=^)')
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*'
......
......@@ -64,7 +64,7 @@ class ArithmeticRightRecursiveGrammar(Grammar):
expression = Forward()
term = Forward()
source_hash__ = "c26bf1eb08a888559f192b19514bc772"
anonymous__ = re.compile('..(?<=^)')
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*'
......
......@@ -62,7 +62,7 @@ class ArithmeticSimpleGrammar(Grammar):
"""
expression = Forward()
source_hash__ = "b71be6a6745f20dda18beebbda77902b"
anonymous__ = re.compile('..(?<=^)')
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*'
......
......@@ -60,7 +60,7 @@ class BibTeXGrammar(Grammar):
"""
text = Forward()
source_hash__ = "f070f9a8eaff76cdd1669dcb63d8b8f3"
anonymous__ = re.compile('..(?<=^)')
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'(?i)%[^\n]*\n'
......
......@@ -25,7 +25,7 @@
# or python-style: # ... \n, excluding, however, character markers: #x20
@ whitespace = /\s*/ # whitespace includes linefeed
@ literalws = right # trailing whitespace of literals will be ignored tacitly
@ anonymous = component, pure_elem, countable, FOLLOW_UP, SYM_REGEX, ANY_SUFFIX, EOF
@ disposable = component, pure_elem, countable, FOLLOW_UP, SYM_REGEX, ANY_SUFFIX, EOF
@ drop = whitespace, EOF # do not include these even in the concrete syntax tree
@ RNG_BRACE_filter = matching_bracket() # filter or transform content of RNG_BRACE on retrieve
......
......@@ -76,7 +76,7 @@ class EBNFGrammar(Grammar):
element = Forward()
expression = Forward()
source_hash__ = "d72459c32970e09870946ca46fb612a8"
anonymous__ = re.compile('component$|pure_elem$|countable$|FOLLOW_UP$|SYM_REGEX$|ANY_SUFFIX$|EOF$')
disposable__ = re.compile('component$|pure_elem$|countable$|FOLLOW_UP$|SYM_REGEX$|ANY_SUFFIX$|EOF$')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
error_messages__ = {'definition': [(re.compile(r','), 'Delimiter "," not expected in definition!\\nEither this was meant to be a directive and the directive symbol @ is missing\\nor the error is due to inconsistent use of the comma as a delimiter\\nfor the elements of a sequence.')]}
......
......@@ -208,7 +208,7 @@ class EBNFLanguageServerProtocol:
https://langserver.org/
"""
completion_fields = ['label', 'insertText', 'insertTextFormat', 'documentation']
completions = [['@ anonymous', '@ anonymous = /${1:_\\w+}/', 2,
completions = [['@ disposable', '@ disposable = /${1:_\\w+}/', 2,
'List of symbols or a regular expression to identify those definitions '
'that shall not yield named tags in the syntax tree.'],
['@ comment', '@ comment = /${1:#.*(?:\\n|$)}/', 2,
......
......@@ -14,7 +14,7 @@
# or python-style: # ... \n, excluding, however, character markers: #x20
@ whitespace = /\s*/ # whitespace includes linefeed
@ literalws = right # trailing whitespace of literals will be ignored tacitly
@ anonymous = component, pure_elem, countable, FOLLOW_UP, SYM_REGEX, ANY_SUFFIX, EOF
@ disposable = component, pure_elem, countable, FOLLOW_UP, SYM_REGEX, ANY_SUFFIX, EOF
@ drop = whitespace, EOF # do not include these even in the concrete syntax tree
@ RNG_BRACE_filter = matching_bracket() # filter or transform content of RNG_BRACE on retrieve
......
......@@ -77,7 +77,7 @@ class FixedEBNFGrammar(Grammar):
element = Forward()
expression = Forward()
source_hash__ = "3db954fa768f359924b256e32786fd0c"
anonymous__ = re.compile('component$|pure_elem$|countable$|FOLLOW_UP$|SYM_REGEX$|ANY_SUFFIX$|EOF$')
disposable__ = re.compile('component$|pure_elem$|countable$|FOLLOW_UP$|SYM_REGEX$|ANY_SUFFIX$|EOF$')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
error_messages__ = {'definition': [(re.compile(r','), 'Delimiter "," not expected in definition!\\nEither this was meant to be a directive and the directive symbol @ is missing\\nor the error is due to inconsistent use of the comma as a delimiter\\nfor the elements of a sequence.')]}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment