In January 2021 we will introduce a 10 GB quota for project repositories. Higher limits for individual projects will be available on request. Please see https://doku.lrz.de/display/PUBLIC/GitLab for more information.

Commit c74091ca authored by Eckhart Arnold's avatar Eckhart Arnold

- added type annotations for better documentation and mypy type checks

parent 4589c6b6
......@@ -20,17 +20,18 @@ compilation of domain specific languages based on an EBNF-grammar.
"""
import os
try:
import regex as re
except ImportError:
import re
from typing import Any, Tuple, cast
from .ebnf import EBNFTransformer, grammar_changed, \
get_ebnf_scanner, get_ebnf_grammar, get_ebnf_transformer, get_ebnf_compiler
from .toolkit import logging, load_if_file, is_python_code, compile_python_object
from .parsers import Grammar, CompilerBase, compile_source, nil_scanner
from .syntaxtree import Node
from DHParser.ebnf import EBNFTransformer, EBNFCompiler, grammar_changed, \
get_ebnf_scanner, get_ebnf_grammar, get_ebnf_transformer, get_ebnf_compiler, \
ScannerFactoryFunc, ParserFactoryFunc, TransformerFactoryFunc, CompilerFactoryFunc
from DHParser.toolkit import logging, load_if_file, is_python_code, compile_python_object
from DHParser.parsers import Grammar, Compiler, compile_source, nil_scanner, ScannerFunc
from DHParser.syntaxtree import Node, TransformerFunc
__all__ = ['GrammarError',
......@@ -71,7 +72,7 @@ try:
except ImportError:
import re
from DHParser.toolkit import logging, is_filename, load_if_file
from DHParser.parsers import Grammar, CompilerBase, nil_scanner, \\
from DHParser.parsers import Grammar, Compiler, nil_scanner, \\
Lookbehind, Lookahead, Alternative, Pop, Required, Token, \\
Optional, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Sequence, RE, Capture, \\
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \\
......@@ -137,7 +138,7 @@ class CompilationError(Exception):
return '\n'.join(self.error_messages)
def grammar_instance(grammar_representation):
def grammar_instance(grammar_representation) -> Tuple[Grammar, str]:
"""Returns a grammar object and the source code of the grammar, from
the given `grammar`-data which can be either a file name, ebnf-code,
python-code, a Grammar-derived grammar class or an instance of
......@@ -167,7 +168,11 @@ def grammar_instance(grammar_representation):
return parser_root, grammar_src
def compileDSL(text_or_file, scanner, dsl_grammar, ast_transformation, compiler):
def compileDSL(text_or_file: str,
scanner: ScannerFunc,
dsl_grammar: Grammar,
ast_transformation: TransformerFunc,
compiler: Compiler) -> Any:
"""Compiles a text in a domain specific language (DSL) with an
EBNF-specified grammar. Returns the compiled text or raises a
compilation error.
......@@ -176,10 +181,10 @@ def compileDSL(text_or_file, scanner, dsl_grammar, ast_transformation, compiler)
CompilationError if any errors occurred during compilation
"""
assert isinstance(text_or_file, str)
assert isinstance(compiler, CompilerBase)
assert isinstance(compiler, Compiler)
parser_root, grammar_src = grammar_instance(dsl_grammar)
result, errors, AST = compile_source(text_or_file, scanner, parser_root,
parser, grammar_src = grammar_instance(dsl_grammar)
result, errors, AST = compile_source(text_or_file, scanner, parser,
ast_transformation, compiler)
if errors:
src = load_if_file(text_or_file)
......@@ -187,7 +192,7 @@ def compileDSL(text_or_file, scanner, dsl_grammar, ast_transformation, compiler)
return result
def raw_compileEBNF(ebnf_src, branding="DSL"):
def raw_compileEBNF(ebnf_src: str, branding="DSL") -> EBNFCompiler:
"""Compiles an EBNF grammar file and returns the compiler object
that was used and which can now be queried for the result as well
as skeleton code for scanner, transformer and compiler objects.
......@@ -208,7 +213,7 @@ def raw_compileEBNF(ebnf_src, branding="DSL"):
return compiler
def compileEBNF(ebnf_src, branding="DSL"):
def compileEBNF(ebnf_src: str, branding="DSL") -> str:
"""Compiles an EBNF source file and returns the source code of a
compiler suite with skeletons for scanner, transformer and
compiler.
......@@ -234,7 +239,7 @@ def compileEBNF(ebnf_src, branding="DSL"):
return '\n'.join(src)
def parser_factory(ebnf_src, branding="DSL"):
def parser_factory(ebnf_src: str, branding="DSL") -> Grammar:
"""Compiles an EBNF grammar and returns a grammar-parser factory
function for that grammar.
......@@ -253,7 +258,8 @@ def parser_factory(ebnf_src, branding="DSL"):
return compile_python_object(DHPARSER_IMPORTS + grammar_src, 'get_(?:\w+_)?grammar$')
def load_compiler_suite(compiler_suite):
def load_compiler_suite(compiler_suite: str) -> \
Tuple[ScannerFactoryFunc, ParserFactoryFunc, TransformerFactoryFunc, CompilerFactoryFunc]:
"""Extracts a compiler suite from file or string ``compiler suite``
and returns it as a tuple (scanner, parser, ast, compiler).
......@@ -282,13 +288,14 @@ def load_compiler_suite(compiler_suite):
if errors:
raise GrammarError('\n\n'.join(errors), source)
scanner = get_ebnf_scanner
parser = get_ebnf_grammar
ast = get_ebnf_transformer
compiler = compile_python_object(imports + compiler_py, 'get_(?:\w+_)?compiler$')
return scanner, parser, ast, compiler
def is_outdated(compiler_suite, grammar_source):
def is_outdated(compiler_suite: str, grammar_source: str) -> bool:
"""Returns ``True`` if the ``compile_suite`` needs to be updated.
An update is needed, if either the grammar in the compieler suite
......@@ -313,7 +320,7 @@ def is_outdated(compiler_suite, grammar_source):
return True
def run_compiler(text_or_file, compiler_suite):
def run_compiler(text_or_file: str, compiler_suite: str) -> Any:
"""Compiles a source with a given compiler suite.
Args:
......@@ -336,7 +343,7 @@ def run_compiler(text_or_file, compiler_suite):
return compileDSL(text_or_file, scanner(), parser(), ast(), compiler())
def compile_on_disk(source_file, compiler_suite="", extension=".xml"):
def compile_on_disk(source_file: str, compiler_suite="", extension=".xml"):
"""Compiles the a source file with a given compiler and writes the
result to a file.
......@@ -373,18 +380,20 @@ def compile_on_disk(source_file, compiler_suite="", extension=".xml"):
rootname = os.path.splitext(filepath)[0]
compiler_name = os.path.basename(rootname)
if compiler_suite:
scanner, parser, trans, cfactory = load_compiler_suite(compiler_suite)
sfactory, pfactory, tfactory, cfactory = load_compiler_suite(compiler_suite)
else:
scanner = get_ebnf_scanner
parser = get_ebnf_grammar
trans = get_ebnf_transformer
sfactory = get_ebnf_scanner
pfactory = get_ebnf_grammar
tfactory = get_ebnf_transformer
cfactory = get_ebnf_compiler
compiler1 = cfactory(compiler_name, source_file)
result, errors, ast = compile_source(source_file, scanner(), parser(), trans(), compiler1)
compiler1 = cfactory()
compiler1.set_grammar_name(compiler_name, source_file)
result, errors, ast = compile_source(source_file, sfactory(), pfactory(), tfactory(), compiler1)
if errors:
return errors
elif cfactory == get_ebnf_compiler: # trans == get_ebnf_transformer or trans == EBNFTransformer: # either an EBNF- or no compiler suite given
ebnf_compiler = cast(EBNFCompiler, compiler1)
global SECTION_MARKER, RX_SECTION_MARKER, SCANNER_SECTION, PARSER_SECTION, \
AST_SECTION, COMPILER_SECTION, END_SECTIONS_MARKER, RX_WHITESPACE, \
DHPARSER_MAIN, DHPARSER_IMPORTS
......@@ -412,11 +421,11 @@ def compile_on_disk(source_file, compiler_suite="", extension=".xml"):
if RX_WHITESPACE.fullmatch(imports):
imports = DHPARSER_IMPORTS
if RX_WHITESPACE.fullmatch(scanner):
scanner = compiler1.gen_scanner_skeleton()
scanner = ebnf_compiler.gen_scanner_skeleton()
if RX_WHITESPACE.fullmatch(ast):
ast = compiler1.gen_transformer_skeleton()
ast = ebnf_compiler.gen_transformer_skeleton()
if RX_WHITESPACE.fullmatch(compiler):
compiler = compiler1.gen_compiler_skeleton()
compiler = ebnf_compiler.gen_compiler_skeleton()
try:
f = open(rootname + 'Compiler.py', 'w', encoding="utf-8")
......@@ -441,6 +450,7 @@ def compile_on_disk(source_file, compiler_suite="", extension=".xml"):
if f: f.close()
else:
f = None
try:
f = open(rootname + extension, 'w', encoding="utf-8")
if isinstance(result, Node):
......
This diff is collapsed.
This diff is collapsed.
......@@ -25,9 +25,9 @@ try:
import regex as re
except ImportError:
import re
from typing import NamedTuple
from typing import Any, Callable, cast, Iterator, NamedTuple, Union, Tuple, List
from .toolkit import log_dir, expand_table, line_col, smart_list
from DHParser.toolkit import log_dir, expand_table, line_col, smart_list
__all__ = ['WHITESPACE_PTYPE',
......@@ -35,6 +35,7 @@ __all__ = ['WHITESPACE_PTYPE',
'ZOMBIE_PARSER',
'Error',
'Node',
'TransformerFunc',
'key_parser_name',
'key_tag_name',
'traverse',
......@@ -117,6 +118,11 @@ ZOMBIE_PARSER = ZombieParser()
Error = NamedTuple('Error', [('pos', int), ('msg', str)])
ChildrenType = Tuple['Node', ...]
ResultType = Union[ChildrenType, str]
SloppyResultT = Union[ChildrenType, 'Node', str, None]
class Node:
"""
Represents a node in the concrete or abstract syntax tree.
......@@ -157,19 +163,21 @@ class Node:
parsing stage and never during or after the
AST-transformation.
"""
def __init__(self, parser, result):
def __init__(self, parser, result: SloppyResultT) -> None:
"""Initializes the ``Node``-object with the ``Parser``-Instance
that generated the node and the parser's result.
"""
self._result = '' # type: ResultType
self._errors = [] # type: List[str]
self._children = () # type: ChildrenType
self._len = len(self.result) if not self.children else \
sum(child._len for child in self.children) # type: int
# self.pos: int = 0 # continuous updating of pos values
self._pos = -1 # type: int
self.result = result
self.parser = parser or ZOMBIE_PARSER
self._errors = []
self.error_flag = any(r.error_flag for r in self.result) if self.children else False
self._len = len(self.result) if not self.children else \
sum(child._len for child in self.children)
# self.pos = 0 # continuous updating of pos values
self._pos = -1
self.error_flag = any(r.error_flag for r in self.children) \
if self.children else False # type: bool
def __str__(self):
if self.children:
......@@ -190,39 +198,41 @@ class Node:
return other
@property
def tag_name(self):
def tag_name(self) -> str:
return self.parser.name or self.parser.ptype
# ONLY FOR DEBUGGING: return self.parser.name + ':' + self.parser.ptype
@property
def result(self):
def result(self) -> ResultType:
return self._result
@result.setter
def result(self, result):
assert ((isinstance(result, tuple) and all(isinstance(child, Node) for child in result))
or isinstance(result, Node)
or isinstance(result, str)), str(result)
def result(self, result: SloppyResultT):
# # made obsolete by static type checking with mypy is done
# assert ((isinstance(result, tuple) and all(isinstance(child, Node) for child in result))
# or isinstance(result, Node)
# or isinstance(result, str)), str(result)
self._result = (result,) if isinstance(result, Node) else result or ''
self._children = self._result if isinstance(self._result, tuple) else ()
self._children = cast(ChildrenType, self._result) \
if isinstance(self._result, tuple) else cast(ChildrenType, ())
@property
def children(self):
def children(self) -> ChildrenType:
return self._children
@property
def len(self):
def len(self) -> int:
# DEBUGGING: print(self.tag_name, str(self.pos), str(self._len), str(self)[:10].replace('\n','.'))
return self._len
@property
def pos(self):
def pos(self) -> int:
assert self._pos >= 0, "position value not initialized!"
return self._pos
@pos.setter
def pos(self, pos):
assert isinstance(pos, int)
def pos(self, pos: int):
# assert isinstance(pos, int)
self._pos = pos
offset = 0
for child in self.children:
......@@ -230,10 +240,10 @@ class Node:
offset += child.len
@property
def errors(self):
def errors(self) -> List[Error]:
return [Error(self.pos, err) for err in self._errors]
def _tree_repr(self, tab, openF, closeF, dataF=lambda s: s):
def _tree_repr(self, tab, openF, closeF, dataF=lambda s: s) -> str:
"""
Generates a tree representation of this node and its children
in string from.
......@@ -266,19 +276,19 @@ class Node:
if self.children:
content = []
for child in self.result:
for child in self.children:
subtree = child._tree_repr(tab, openF, closeF, dataF).split('\n')
content.append('\n'.join((tab + s) for s in subtree))
return head + '\n'.join(content) + tail
if head[0] == "<" and self.result.find('\n') < 0:
res = cast(str, self.result) # safe, because if there are no children, result is a string
if head[0] == "<" and res.find('\n') < 0:
# for XML: place tags for leaf-nodes on one line if possible
return head[:-1] + self.result + tail[1:]
else:
return head + '\n'.join([tab + dataF(s)
for s in self.result.split('\n')]) + tail
return head + '\n'.join([tab + dataF(s) for s in res.split('\n')]) + tail
def as_sexpr(self, src=None):
def as_sexpr(self, src=None) -> str:
"""
Returns content as S-expression, i.e. in lisp-like form.
......@@ -290,7 +300,7 @@ class Node:
of leaf nodes shall be applied for better readability.
"""
def opening(node):
def opening(node) -> str:
s = '(' + node.tag_name
# s += " '(pos %i)" % node.pos
if src:
......@@ -307,7 +317,7 @@ class Node:
return self._tree_repr(' ', opening, lambda node: ')', pretty) # pretty if prettyprint else lambda s: s)
def as_xml(self, src=None):
def as_xml(self, src=None) -> str:
"""
Returns content as XML-tree.
......@@ -317,7 +327,7 @@ class Node:
column.
"""
def opening(node):
def opening(node) -> str:
s = '<' + node.tag_name
# s += ' pos="%i"' % node.pos
if src:
......@@ -333,7 +343,7 @@ class Node:
return self._tree_repr(' ', opening, closing)
def add_error(self, error_str):
def add_error(self, error_str) -> 'Node':
self._errors.append(error_str)
self.error_flag = True
return self
......@@ -347,7 +357,7 @@ class Node:
child.propagate_error_flags()
self.error_flag |= child.error_flag
def collect_errors(self, clear_errors=False):
def collect_errors(self, clear_errors=False) -> List[Error]:
"""
Returns all errors of this node or any child node in the form
of a set of tuples (position, error_message), where position
......@@ -358,7 +368,7 @@ class Node:
self._errors = []
self.error_flag = False
if self.children:
for child in self.result:
for child in self.children:
errors.extend(child.collect_errors(clear_errors))
return errors
......@@ -367,7 +377,7 @@ class Node:
with open(os.path.join(log_dir(), st_file_name), "w", encoding="utf-8") as f:
f.write(self.as_sexpr())
def find(self, match_function):
def find(self, match_function) -> Iterator['Node']:
"""Finds nodes in the tree that match a specific criterion.
``find`` is a generator that yields all nodes for which the
......@@ -436,15 +446,18 @@ class Node:
########################################################################
TransformerFunc = Union[Callable[[Node], Any], partial]
WHITESPACE_PTYPE = ':Whitespace'
TOKEN_PTYPE = ':Token'
def key_parser_name(node):
def key_parser_name(node) -> str:
return node.parser.name
def key_tag_name(node):
def key_tag_name(node) -> str:
return node.tag_name
......
......@@ -38,6 +38,7 @@ try:
import regex as re
except ImportError:
import re
from typing import List, Tuple
__all__ = ['logging',
......@@ -123,7 +124,7 @@ def is_logging():
return False
def line_col(text, pos):
def line_col(text: str, pos: int) -> Tuple[int, int]:
"""Returns the position within a text as (line, column)-tuple.
"""
assert pos < len(text), str(pos) + " >= " + str(len(text))
......@@ -132,7 +133,7 @@ def line_col(text, pos):
return line, column
def error_messages(source_text, errors):
def error_messages(source_text, errors) -> List[str]:
"""Returns the sequence or iterator of error objects as an intertor
of error messages with line and column numbers at the beginning.
......@@ -149,7 +150,7 @@ def error_messages(source_text, errors):
for err in sorted(list(errors))]
def compact_sexpr(s):
def compact_sexpr(s) -> str:
"""Returns S-expression ``s`` as a one liner without unnecessary
whitespace.
......@@ -160,7 +161,7 @@ def compact_sexpr(s):
return re.sub('\s(?=\))', '', re.sub('\s+', ' ', s)).strip()
def escape_re(s):
def escape_re(s) -> str:
"""Returns `s` with all regular expression special characters escaped.
"""
assert isinstance(s, str)
......@@ -170,13 +171,13 @@ def escape_re(s):
return s
def is_filename(s):
def is_filename(s) -> bool:
"""Tries to guess whether string ``s`` is a file name."""
return s.find('\n') < 0 and s[:1] != " " and s[-1:] != " " \
and s.find('*') < 0 and s.find('?') < 0
def logfile_basename(filename_or_text, function_or_class_or_instance):
def logfile_basename(filename_or_text, function_or_class_or_instance) -> str:
"""Generates a reasonable logfile-name (without extension) based on
the given information.
"""
......@@ -191,10 +192,11 @@ def logfile_basename(filename_or_text, function_or_class_or_instance):
return s[:i] + '_out' if i >= 0 else s
def load_if_file(text_or_file):
"""Reads and returns content of a file if parameter `text_or_file` is a
file name (i.e. a single line string), otherwise (i.e. if `text_or_file` is
a multiline string) `text_or_file` is returned.
def load_if_file(text_or_file) -> str:
"""Reads and returns content of a text-file if parameter
`text_or_file` is a file name (i.e. a single line string),
otherwise (i.e. if `text_or_file` is a multiline string)
`text_or_file` is returned.
"""
if is_filename(text_or_file):
try:
......@@ -211,7 +213,7 @@ def load_if_file(text_or_file):
return text_or_file
def is_python_code(text_or_file):
def is_python_code(text_or_file) -> bool:
"""Checks whether 'text_or_file' is python code or the name of a file that
contains python code.
"""
......@@ -295,7 +297,7 @@ def expand_table(compact_table):
return expanded_table
def sane_parser_name(name):
def sane_parser_name(name) -> bool:
"""Checks whether given name is an acceptable parser name. Parser names
must not be preceeded or succeeded by a double underscore '__'!
"""
......
......@@ -1390,7 +1390,7 @@ def full_compilation(source, grammar_base, AST_transformations, compiler):
assigns AST transformation functions to parser names (see
function traverse)
compiler (object): An instance of a class derived from
``CompilerBase`` with a suitable method for every parser
``Compiler`` with a suitable method for every parser
name or class.
Returns (tuple):
......@@ -1422,7 +1422,7 @@ def full_compilation(source, grammar_base, AST_transformations, compiler):
return result, messages, syntax_tree
COMPILER_SYMBOLS = {'CompilerBase', 'Node', 're'}
COMPILER_SYMBOLS = {'Compiler', 'Node', 're'}
########################################################################
......@@ -1616,7 +1616,7 @@ class EBNFCompiler(CompilerBase):
if not self.definition_names:
raise EBNFCompilerError('Compiler has not been run before calling '
'"gen_Compiler_Skeleton()"!')
compiler = ['class ' + self.grammar_name + 'Compiler(CompilerBase):',
compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
' """Compiler for the abstract-syntax-tree of a ' +
self.grammar_name + ' source file.',
' """', '',
......@@ -2086,7 +2086,7 @@ def run_compiler(source_file, compiler_suite="", extension=".xml"):
intro, syms, scanner, parser, ast, compiler, outro = RX_SECTION_MARKER.split(source)
except (PermissionError, FileNotFoundError, IOError) as error:
intro, outro = '', ''
syms = import_block("PyDSL", PARSER_SYMBOLS | AST_SYMBOLS | {'CompilerBase'})
syms = import_block("PyDSL", PARSER_SYMBOLS | AST_SYMBOLS | {'Compiler'})
scanner = compiler.gen_scanner_skeleton()
ast = compiler.gen_AST_skeleton()
compiler = compiler.gen_compiler_skeleton()
......
......@@ -14,7 +14,7 @@ try:
import regex as re
except ImportError:
import re
from DHParser.parsers import Grammar, CompilerBase, Alternative, Required, Token, \
from DHParser.parsers import Grammar, Compiler, Alternative, Required, Token, \
Optional, OneOrMore, Sequence, RE, ZeroOrMore, NegativeLookahead, mixin_comment, compile_source
from DHParser.syntaxtree import traverse, reduce_single_child, replace_by_single_child, no_operation, \
remove_expendables, remove_tokens, flatten, \
......@@ -290,7 +290,7 @@ MLWTransform = partial(traverse, processing_table=MLW_AST_transformation_table)
#
#######################################################################
class MLWCompiler(CompilerBase):
class MLWCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a MLW source file.
"""
......
......@@ -7,19 +7,23 @@
#######################################################################
from functools import partial
import os
import sys
from functools import partial
try:
import regex as re
except ImportError:
import re
from DHParser.toolkit import logging, is_filename
from DHParser.parsers import Grammar, CompilerBase, Required, Token, \
Optional, OneOrMore, ZeroOrMore, Sequence, RE, NegativeLookahead, \
mixin_comment, compile_source
from DHParser.syntaxtree import traverse, no_operation
from DHParser.toolkit import logging, is_filename, load_if_file
from DHParser.parsers import Grammar, Compiler, nil_scanner, \
Lookbehind, Lookahead, Alternative, Pop, Required, Token, \
Optional, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Sequence, RE, Capture, \
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \
nop_filter, counterpart_filter, accumulating_filter
from DHParser.syntaxtree import Node, traverse, remove_enclosing_delimiters, \
remove_children_if, reduce_single_child, replace_by_single_child, remove_whitespace, \
no_operation, remove_expendables, remove_tokens, flatten, is_whitespace, is_expendable, \
collapse, map_content, WHITESPACE_PTYPE, TOKEN_PTYPE
#######################################################################
......@@ -31,7 +35,6 @@ from DHParser.syntaxtree import traverse, no_operation
def LyrikScanner(text):
return text
def get_scanner():
return LyrikScanner
......@@ -76,7 +79,7 @@ class LyrikGrammar(Grammar):
JAHRESZAHL = /\d\d\d\d/~
ENDE = !/./
"""
source_hash__ = "7a99fa77a7d2b81976293d54696eb4f3"
source_hash__ = "3e9ec28cf58667fc259569326f76cf90"
parser_initialization__ = "upon instatiation"
COMMENT__ = r''
WSP__ = mixin_comment(whitespace=r'[\t ]*', comment=r'')
......@@ -133,20 +136,19 @@ Lyrik_AST_transformation_table = {
"untertitel": no_operation,
"ort": no_operation,
"jahr": no_operation,
"wortfolge": no_operation,
"namenfolge": no_operation,
"verknüpfung": no_operation,
"ziel": no_operation,
"serie": no_operation,
"titel": no_operation,
"zeile": no_operation,
"text": no_operation,
"strophe": no_operation,
"vers": no_operation,
"wortfolge": no_operation,
"namenfolge": no_operation,
"verknüpfung": no_operation,
"ziel": no_operation,
"WORT": no_operation,
"NAME": no_operation,
"ZEICHENFOLGE": no_operation,
"LEER": no_operation,
"NZ": no_operation,
"LEERZEILE": no_operation,
"JAHRESZAHL": no_operation,
......@@ -167,7 +169,7 @@ def get_transformer():
#
#######################################################################
class LyrikCompiler(CompilerBase):
class LyrikCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a Lyrik source file.
"""
......@@ -175,79 +177,76 @@ class LyrikCompiler(CompilerBase):
super(LyrikCompiler, self).__init__(grammar_name, grammar_source)
assert re.match('\w+\Z', grammar_name)