10.12., 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit 694ca243 authored by Eckhart Arnold's avatar Eckhart Arnold

- potential infinite loops are now caught when running parser and an error is...

- potential infinite loops are now caught when running parser and an error is reported; bug fixes, tests
parent b5fd9558
#!/usr/bin/python3
"""DSLsupport.py - Support for domain specific notations for DHParser
"""dsl.py - Support for domain specific notations for DHParser
Copyright 2016 by Eckhart Arnold (arnold@badw.de)
Bavarian Academy of Sciences an Humanities (badw.de)
......@@ -23,15 +23,15 @@ compilation of domain specific languages based on an EBNF-grammar.
import collections
import os
try:
import regex as re
except ImportError:
import re
from .__init__ import __version__
from .EBNFcompiler import EBNFGrammar, EBNF_ASTPipeline, EBNFCompiler
from .toolkit import load_if_file, is_python_code, md5, compile_python_object
from .parsercombinators import GrammarBase, CompilerBase, full_compilation, nil_scanner
from .ebnf import EBNFGrammar, EBNF_ASTPipeline, EBNFCompiler
from .toolkit import load_if_file, is_python_code, compile_python_object
from .parsers import GrammarBase, CompilerBase, full_compilation, nil_scanner
from .syntaxtree import Node
......@@ -39,8 +39,7 @@ __all__ = ['GrammarError',
'CompilationError',
'load_compiler_suite',
'compileDSL',
'run_compiler',
'source_changed']
'run_compiler']
SECTION_MARKER = """\n
......@@ -94,7 +93,7 @@ try:
except ImportError:
import re
from DHParser.toolkit import load_if_file
from DHParser.parsercombinators import GrammarBase, CompilerBase, nil_scanner, \\
from DHParser.parsers import GrammarBase, CompilerBase, nil_scanner, \\
Lookbehind, Lookahead, Alternative, Pop, Required, Token, \\
Optional, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Sequence, RE, Capture, \\
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, full_compilation
......@@ -109,8 +108,7 @@ DHPARSER_COMPILER = '''
def compile_{NAME}(source):
"""Compiles ``source`` and returns (result, errors, ast).
"""
source_text = load_if_file(source)
return full_compilation({NAME}Scanner(source_text),
return full_compilation(source, {NAME}Scanner,
{NAME}Grammar(), {NAME}_ASTPipeline, {NAME}Compiler())
if __name__ == "__main__":
......@@ -139,10 +137,10 @@ def get_grammar_instance(grammar):
if is_python_code(grammar):
parser_py, errors, AST = grammar_src, '', None
else:
parser_py, errors, AST = full_compilation(grammar_src,
EBNFGrammar(), EBNF_ASTPipeline, EBNFCompiler())
parser_py, errors, AST = full_compilation(grammar_src, None,
EBNFGrammar(), EBNF_ASTPipeline, EBNFCompiler())
if errors:
raise GrammarError(errors, grammar_src)
raise GrammarError('\n\n'.join(errors), grammar_src)
parser_root = compile_python_object(DHPARSER_IMPORTS + parser_py, '\w*Grammar$')()
else:
# assume that dsl_grammar is a ParserHQ-object or Grammar class
......@@ -175,9 +173,9 @@ def load_compiler_suite(compiler_suite):
else:
# assume source is an ebnf grammar
parser_py, errors, AST = full_compilation(
source, EBNFGrammar(), EBNF_ASTPipeline, EBNFCompiler())
source, None, EBNFGrammar(), EBNF_ASTPipeline, EBNFCompiler())
if errors:
raise GrammarError(errors, source)
raise GrammarError('\n\n'.join(errors), source)
scanner = nil_scanner
ast = EBNF_ASTPipeline
compiler = EBNFCompiler()
......@@ -196,14 +194,20 @@ def compileDSL(text_or_file, dsl_grammar, ast_pipeline, compiler,
assert isinstance(compiler, CompilerBase)
assert isinstance(ast_pipeline, collections.abc.Sequence) or isinstance(ast_pipeline, dict)
parser_root, grammar_src = get_grammar_instance(dsl_grammar)
src = scanner(load_if_file(text_or_file))
result, errors, AST = full_compilation(src, parser_root, ast_pipeline, compiler)
if errors: raise CompilationError(errors, src, grammar_src, AST)
src = load_if_file(text_or_file)
result, errors, AST = full_compilation(src, scanner, parser_root, ast_pipeline, compiler)
if errors: raise CompilationError('\n\n'.join(errors), src, grammar_src, AST)
return result
def compileEBNF(ebnf_src, ebnf_grammar_obj=None, source_only=False):
"""Compiles an EBNF source file into a Grammar class
"""Compiles an EBNF source file into a Grammar class.
Please note: This functions returns a class which must be
instantiated before calling its parse()-method! Calling the method
directly from the class (which is technically possible in python
yields an error message complaining about a missing parameter,
the cause of which may not be obvious at first sight.
Args:
ebnf_src(str): Either the file name of an EBNF grammar or
......@@ -216,7 +220,7 @@ def compileEBNF(ebnf_src, ebnf_grammar_obj=None, source_only=False):
class is returned instead of the class itself.
Returns:
A Grammar class that can be instantiated for parsing a text
which conforms to the language defined by ``ebnf_src``
which conforms to the language defined by ``ebnf_src``.
"""
grammar = ebnf_grammar_obj or EBNFGrammar()
grammar_src = compileDSL(ebnf_src, grammar, EBNF_ASTPipeline, EBNFCompiler())
......@@ -251,8 +255,7 @@ def run_compiler(source_file, compiler_suite="", extension=".xml"):
parser = EBNFGrammar()
trans = EBNF_ASTPipeline
compiler = EBNFCompiler(compiler_name, source)
result, errors, ast = full_compilation(scanner(source), parser,
trans, compiler)
result, errors, ast = full_compilation(source, scanner, parser, trans, compiler)
if errors:
return errors
......@@ -266,7 +269,8 @@ def run_compiler(source_file, compiler_suite="", extension=".xml"):
source = f.read()
intro, imports, scanner, parser, ast, compiler, outro = RX_SECTION_MARKER.split(source)
except (PermissionError, FileNotFoundError, IOError) as error:
intro, outro = '', ''
intro = '#!/usr/bin/python'
outro = DHPARSER_COMPILER.format(NAME=compiler_name)
imports = DHPARSER_IMPORTS
scanner = compiler.gen_scanner_skeleton()
ast = compiler.gen_AST_skeleton()
......@@ -279,7 +283,6 @@ def run_compiler(source_file, compiler_suite="", extension=".xml"):
try:
f = open(rootname + '_compiler.py', 'w', encoding="utf-8")
f.write("#!/usr/bin/python")
f.write(intro)
f.write(SECTION_MARKER.format(marker=SYMBOLS_SECTION))
f.write(imports)
......@@ -293,7 +296,6 @@ def run_compiler(source_file, compiler_suite="", extension=".xml"):
f.write(compiler)
f.write(SECTION_MARKER.format(marker=END_SECTIONS_MARKER))
f.write(outro)
f.write(DHPARSER_COMPILER.format(NAME=compiler_name))
except (PermissionError, FileNotFoundError, IOError) as error:
print('# Could not write file "' + rootname + '_compiler.py" because of: '
+ "\n# ".join(str(error).split('\n)')))
......@@ -318,32 +320,3 @@ def run_compiler(source_file, compiler_suite="", extension=".xml"):
return []
def source_changed(grammar_source, grammar_class):
"""Returns `True` if `grammar_class` does not reflect the latest
changes of `grammar_source`
Parameters:
grammar_source: File name or string representation of the
grammar source
grammar_class: the parser class representing the grammar
or the file name of a compiler suite containing the grammar
Returns (bool):
True, if the source text of the grammar is different from the
source from which the grammar class was generated
"""
grammar = load_if_file(grammar_source)
chksum = md5(grammar, __version__)
if isinstance(grammar_class, str):
# grammar_class = load_compiler_suite(grammar_class)[1]
with open(grammar_class, 'r', encoding='utf8') as f:
pycode = f.read()
m = re.search('class \w*\(GrammarBase\)', pycode)
if m:
m = re.search(' source_hash__ *= *"([a-z0-9]*)"',
pycode[m.span()[1]:])
return not (m and m.groups() and m.groups()[-1] == chksum)
else:
return True
else:
return chksum != grammar_class.source_hash__
#!/usr/bin/python3
"""EBNFcompiler.py - EBNF -> Python-Parser compilation for DHParser
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
Copyright 2016 by Eckhart Arnold (arnold@badw.de)
Bavarian Academy of Sciences an Humanities (badw.de)
......@@ -21,6 +21,7 @@ permissions and limitations under the License.
# import collections
import keyword
from functools import partial
try:
import regex as re
except ImportError:
......@@ -28,7 +29,7 @@ except ImportError:
from .__init__ import __version__
from .toolkit import load_if_file, escape_re, md5, sane_parser_name
from .parsercombinators import GrammarBase, mixin_comment, Forward, RE, NegativeLookahead, \
from .parsers import GrammarBase, mixin_comment, Forward, RE, NegativeLookahead, \
Alternative, Sequence, Optional, Required, OneOrMore, ZeroOrMore, Token, CompilerBase
from .syntaxtree import Node, remove_enclosing_delimiters, reduce_single_child, \
replace_by_single_child, TOKEN_KEYWORD, remove_expendables, remove_tokens, flatten, \
......@@ -163,25 +164,21 @@ class EBNFCompilerError(Exception):
pass
# Scanner = collections.namedtuple('Scanner',
# 'symbol instantiation_call cls_name cls')
class EBNFCompiler(CompilerBase):
"""Generates a Parser from an abstract syntax tree of a grammar specified
in EBNF-Notation.
"""
COMMENT_KEYWORD = "COMMENT__"
DEFAULT_WHITESPACE = r'[\t ]*'
RESERVED_SYMBOLS = {TOKEN_KEYWORD, WHITESPACE_KEYWORD, COMMENT_KEYWORD}
KNOWN_DIRECTIVES = {'comment', 'whitespace', 'tokens', 'literalws'}
VOWELS = {'A', 'E', 'I', 'O', 'U'} # what about cases like 'hour', 'universe' etc.?
AST_ERROR = "Badly structured syntax tree. " \
"Potentially due to erroneuos AST transformation."
PREFIX_TABLE = [('§', 'Required'), ('&', 'Lookahead'),
('!', 'NegativeLookahead'), ('-&', 'Lookbehind'),
('-!', 'NegativeLookbehind'), ('::', 'Pop'),
(':', 'Retrieve')]
WHITESPACE = {'horizontal': r'[\t ]*', # default: horizontal
'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
'vertical': r'\s*'}
def __init__(self, grammar_name="", source_text=""):
super(EBNFCompiler, self).__init__()
......@@ -194,13 +191,13 @@ class EBNFCompiler(CompilerBase):
self.rules = set()
self.symbols = set()
self.variables = set()
self.scanner_tokens = set()
self.definition_names = []
self.recursive = set()
self.root = ""
self.directives = {'whitespace': self.DEFAULT_WHITESPACE,
self.directives = {'whitespace': self.WHITESPACE['horizontal'],
'comment': '',
'literalws': ['right']}
'literalws': ['right'],
'tokens': set()}
def gen_scanner_skeleton(self):
name = self.grammar_name + "Scanner"
......@@ -263,8 +260,7 @@ class EBNFCompiler(CompilerBase):
# prepare parser class header and docstring and
# add EBNF grammar to the doc string of the parser class
article = 'an ' if self.grammar_name[0:1].upper() \
in EBNFCompiler.VOWELS else 'a '
article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a ' # what about 'hour', 'universe' etc.?
declarations = ['class ' + self.grammar_name +
'Grammar(GrammarBase):',
'r"""Parser for ' + article + self.grammar_name +
......@@ -324,7 +320,7 @@ class EBNFCompiler(CompilerBase):
elif not sane_parser_name(rule):
node.add_error('Illegal symbol "%s". Symbols must not start or '
' end with a doube underscore "__".' % rule)
elif rule in self.scanner_tokens:
elif rule in self.directives['tokens']:
node.add_error('Symbol "%s" has already been defined as '
'a scanner token.' % rule)
elif keyword.iskeyword(rule):
......@@ -361,17 +357,17 @@ class EBNFCompiler(CompilerBase):
def directive(self, node):
key = node.result[0].result.lower()
assert key not in self.scanner_tokens
assert key not in self.directives['tokens']
if key in {'comment', 'whitespace'}:
if node.result[1].parser.name == "list_":
if len(node.result[1].result) != 1:
node.add_error('Directive "%s" must have one, but not %i values.' %
(key, len(node.result[1])))
value = self.compile__(node.result[1]).pop()
if value in {'linefeed', 'standard'} and key == 'whitespace':
value = '\s*' if value == "linefeed" else self.DEFAULT_WHITESPACE
if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
value = EBNFCompiler.WHITESPACE[value] # replace whitespace-name by regex
else:
node.add_error('Value "%" not allowed for directive "%s".' % (value, key))
node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
else:
value = node.result[1].result.strip("~")
if value != node.result[1].result:
......@@ -382,6 +378,7 @@ class EBNFCompiler(CompilerBase):
elif value[0] + value[-1] == '//':
value = self._check_rx(node, value[1:-1])
self.directives[key] = value
elif key == 'literalws':
value = {item.lower() for item in self.compile__(node.result[1])}
if (len(value - {'left', 'right', 'both', 'none'}) > 0
......@@ -394,11 +391,11 @@ class EBNFCompiler(CompilerBase):
self.directives[key] = list(ws)
elif key == 'tokens':
self.scanner_tokens |= self.compile__(node.result[1])
self.directives['tokens'] |= self.compile__(node.result[1])
else:
node.add_error('Unknown directive %s ! (Known ones are %s .)' %
(key,
', '.join(list(EBNFCompiler.KNOWN_DIRECTIVES))))
', '.join(list(self.directives.keys()))))
return ""
def non_terminal(self, node, parser_class):
......@@ -463,7 +460,7 @@ class EBNFCompiler(CompilerBase):
"AST transformation!")
def symbol(self, node):
if node.result in self.scanner_tokens:
if node.result in self.directives['tokens']:
return 'ScannerToken("' + node.result + '")'
else:
self.symbols.add(node)
......@@ -472,7 +469,7 @@ class EBNFCompiler(CompilerBase):
return node.result
def literal(self, node):
return 'Token(' + ', '.join([node.result]) + ')'
return 'Token(' + node.result.replace('\\', r'\\') + ')' # return 'Token(' + ', '.join([node.result]) + ')' ?
def regexp(self, node):
rx = node.result
......@@ -501,3 +498,34 @@ class EBNFCompiler(CompilerBase):
def list_(self, node):
assert node.children
return set(item.result.strip() for item in node.result)
def source_changed(grammar_source, grammar_class):
"""Returns `True` if `grammar_class` does not reflect the latest
changes of `grammar_source`
Parameters:
grammar_source: File name or string representation of the
grammar source
grammar_class: the parser class representing the grammar
or the file name of a compiler suite containing the grammar
Returns (bool):
True, if the source text of the grammar is different from the
source from which the grammar class was generated
"""
grammar = load_if_file(grammar_source)
chksum = md5(grammar, __version__)
if isinstance(grammar_class, str):
# grammar_class = load_compiler_suite(grammar_class)[1]
with open(grammar_class, 'r', encoding='utf8') as f:
pycode = f.read()
m = re.search('class \w*\(GrammarBase\)', pycode)
if m:
m = re.search(' source_hash__ *= *"([a-z0-9]*)"',
pycode[m.span()[1]:])
return not (m and m.groups() and m.groups()[-1] == chksum)
else:
return True
else:
return chksum != grammar_class.source_hash__
#!/usr/bin/python3
"""parsercombinators.py - parser combinators for for DHParser
"""parsers.py - parser combinators for for DHParser
Copyright 2016 by Eckhart Arnold (arnold@badw.de)
Bavarian Academy of Sciences an Humanities (badw.de)
......@@ -62,7 +62,7 @@ except ImportError:
from .toolkit import IS_LOGGING, LOGS_DIR, escape_re, sane_parser_name, smart_list
from .syntaxtree import WHITESPACE_KEYWORD, TOKEN_KEYWORD, ZOMBIE_PARSER, Node, \
traverse
from DHParser.toolkit import error_messages
from DHParser.toolkit import load_if_file, error_messages
__all__ = ['HistoryRecord',
'Parser',
......@@ -168,6 +168,7 @@ def add_parser_guard(parser_func):
grammar.moving_forward = False
record = HistoryRecord(grammar.call_stack.copy(), node, len(rest))
grammar.history.append(record)
# print(record.stack, record.status, rest[:20].replace('\n', '|'))
grammar.call_stack.pop()
if node is not None:
......@@ -396,6 +397,27 @@ class GrammarBase:
write_log(errors_only, log_file_name + '_errors')
def dsl_error_msg(parser, error_str):
"""Returns an error messsage for errors in the parser configuration,
e.g. errors that result in infinite loops.
Args:
parser (Parser: The parser where the error was noticed. Note
that this is not necessarily the parser that caused the
error but only where the error became apparaent.
error_str (str): A short string describing the error.
Returns:
str: An error message including the call stack if history
tacking has been turned in the grammar object.
"""
msg = ["DSL parser specification error:", error_str, "caught by parser", str(parser)]
if parser.grammar.history:
msg.extend(["\nCall stack:", parser.grammar.history[-1].stack])
else:
msg.extend(["\nEnable history tracking in Grammar object to display call stack."])
return " ".join(msg)
########################################################################
#
# Token and Regular Expression parser classes (i.e. leaf classes)
......@@ -517,7 +539,6 @@ class RE(Parser):
name: The optional name of the parser.
"""
super(RE, self).__init__(name)
# assert wR or regexp == '.' or isinstance(self, Token)
self.wL = wL
self.wR = wR
self.wspLeft = RegExp(wL, WHITESPACE_KEYWORD) if wL else ZOMBIE_PARSER
......@@ -649,10 +670,14 @@ class Optional(UnaryOperator):
class ZeroOrMore(Optional):
def __call__(self, text):
results = ()
while text:
n = len(text) + 1
while text and len(text) < n:
n = len(text)
node, text = self.parser(text)
if not node:
break
if len(text) == n:
node.add_error(dsl_error_msg(self, 'Infinite Loop.'))
results += (node,)
return Node(self, results), text
......@@ -667,10 +692,14 @@ class OneOrMore(UnaryOperator):
def __call__(self, text):
results = ()
text_ = text
while text_:
n = len(text) + 1
while text_ and len(text_) < n:
n = len(text_)
node, text_ = self.parser(text_)
if not node:
break
if len(text_) == n:
node.add_error(dsl_error_msg(self, 'Infinite Loop.'))
results += (node,)
if results == ():
return None, text
......@@ -919,17 +948,21 @@ class CompilerBase:
return result
def full_compilation(source, grammar_base, AST_pipeline, compiler):
"""Compiles a source in three stages:
1. Parsing
2. AST-transformation
3. Compiling.
def full_compilation(source, scanner, parser, AST_pipeline, compiler):
"""Compiles a source in four stages:
1. Scanning (if needed)
2. Parsing
3. AST-transformation
4. Compiling.
The compilations stage is only invoked if no errors occurred in
either of the two previous stages.
Args:
source (str): The input text for compilation
grammar_base (GrammarBase): The GrammarBase object
source (str): The input text for compilation or a the name of a
file containing the input text.
scanner (funciton): text -> text. A scanner function or None,
if no scanner is needed.
parser (GrammarBase): The GrammarBase object
AST_pipeline (dict or list of dicts): A syntax-tree processing
table or a sequence of processing tables. The first of
these table usually contains the transformations for
......@@ -949,13 +982,16 @@ def full_compilation(source, grammar_base, AST_pipeline, compiler):
"""
assert isinstance(compiler, CompilerBase)
syntax_tree = grammar_base.parse(source)
cname = grammar_base.__class__.__name__
log_file_name = cname[:-7] if cname.endswith('Grammar') else cname
source_text = load_if_file(source)
log_file_name = os.path.basename(os.path.splitext(source)[0]) if source != source_text \
else compiler.__class__.__name__ + '_out'
if scanner is not None:
source_text = scanner(source_text)
syntax_tree = parser.parse(source_text)
syntax_tree.log(log_file_name, ext='.cst')
grammar_base.log_parsing_history()
parser.log_parsing_history(log_file_name)
assert syntax_tree.error_flag or str(syntax_tree) == source, str(syntax_tree)
assert syntax_tree.error_flag or str(syntax_tree) == source_text, str(syntax_tree)
# only compile if there were no syntax errors, for otherwise it is
# likely that error list gets littered with compile error messages
if syntax_tree.error_flag:
......@@ -969,7 +1005,6 @@ def full_compilation(source, grammar_base, AST_pipeline, compiler):
if not errors:
result = compiler.compile__(syntax_tree)
errors = syntax_tree.collect_errors()
messages = error_messages(source, errors)
messages = error_messages(source_text, errors)
return result, messages, syntax_tree
......@@ -182,6 +182,7 @@ class Node:
@pos.setter
def pos(self, pos):
assert isinstance(pos, int)
self._pos = pos
offset = 0
for child in self.children:
......
......@@ -119,16 +119,21 @@ def line_col(text, pos):
return line, column
def error_messages(text, errors):
"""
Converts the list of ``errors`` collected from the root node of the
parse tree of `text` into a human readable (and IDE or editor
parsable text) with line an column numbers. Error messages are
separated by an empty line.
def error_messages(source_text, errors):
"""Returns the sequence or iterator of error objects as an intertor
of error messages with line and column numbers at the beginning.
Args:
source_text (str): The source text on which the errors occurred.
(Needed in order to determine the line and column numbers.)
errors (list): The list of errors as returned by the method
``collect_errors()`` of a Node object
Returns:
a list that contains all error messages in string form. Each
string starts with "line: [Line-No], column: [Column-No]
"""
return "\n\n".join("line: %i, column: %i, error: %s" %
(*line_col(text, err.pos), err.msg)
for err in sorted(list(errors)))
return ["line: %i, column: %i, error: %s" % (*line_col(source_text, err.pos), err.msg)
for err in sorted(list(errors))]
def compact_sexpr(s):
......@@ -158,9 +163,14 @@ def load_if_file(text_or_file):
a multiline string) `text_or_file` is returned.
"""
if text_or_file and text_or_file.find('\n') < 0:
with open(text_or_file, encoding="utf-8") as f:
content = f.read()
return content
try:
with open(text_or_file, encoding="utf-8") as f:
content = f.read()
return content
except FileNotFoundError as error:
if not re.match(r'\w+', text_or_file):
raise FileNotFoundError('Not a valid file: ' + text_or_file +
'\nAdd "\\n" to distinguish source data from a file name!')
else:
return text_or_file
......
#!/usr/bin/python3
#!/usr/bin/python
"""dhparser.py - command line tool for DHParser
......@@ -24,9 +24,10 @@ import os
import sys
from functools import partial
from DHParser.DSLsupport import compileDSL, run_compiler
from DHParser.EBNFcompiler import EBNFGrammar, EBNF_ASTPipeline, EBNFCompiler
from DHParser.parsercombinators import full_compilation
from DHParser.dsl import compileDSL, run_compiler
from DHParser.ebnf import EBNFGrammar, EBNF_ASTPipeline, EBNFCompiler
from DHParser.parsers import full_compilation
def selftest(file_name):
print(file_name)
......@@ -35,10 +36,11 @@ def selftest(file_name):
compiler_name = os.path.basename(os.path.splitext(file_name)[0])
compiler = EBNFCompiler(compiler_name, grammar)
parser = EBNFGrammar()
result, errors, syntax_tree = full_compilation(grammar, parser, EBNF_ASTPipeline, compiler)
result, errors, syntax_tree = full_compilation(grammar, None, parser,
EBNF_ASTPipeline, compiler)
print(result)
if errors:
print(errors)
print('\n\n'.join(errors))
sys.exit(1)
else:
result = compileDSL(grammar, result, EBNF_ASTPipeline, compiler)
......@@ -70,7 +72,7 @@ if __name__ == "__main__":
_errors = run_compiler(sys.argv[1],
sys.argv[2] if len(sys.argv) > 2 else "")
if _errors:
print(_errors)
print('\n\n'.join(_errors))
sys.exit(1)
else:
# run self test
......
# latex Grammar
@ whitespace := /[ \t]*\n?(?!\s*\n)[ \t]*/
@ comment := /%.*(?:\n|$)/
@ whitespace = /[ \t]*\n?(?!\s*\n)[ \t]*/
@ comment = /%.*(?:\n|$)/
genericenv := beginenv sequence endenv
beginenv := "\begin" §( "{" name "}" )
endenv := "\end" §( "{" @name "}" )
genericenv = beginenv sequence endenv
beginenv = "\begin" §( "{" name "}" )
endenv = "\end" §( "{" ::name "}" )
name := ~/\w+/
name = ~/\w+/
genericcmd := command [ config ] block
command := /\\\w+/
config := "[" cfgtext §"]"
genericcmd = command [ config ] block
command = /\\\w+/
config = "[" cfgtext §"]"
sequence := { partext | parblock }
sequence = { partext | parblock }
parblock := "{" { partext | parblock } §"}"
block := "{" { text | block } §"}"
parblock = "{" { partext | parblock } §"}"
block = "{" { text | block } §"}"
partext := text | par
text := cfgtext | brackets