2.12.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

ebnf.py 42.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
import keyword
28
from collections import OrderedDict
29
from functools import partial
30

eckhart's avatar
eckhart committed
31
from DHParser.compile import CompilerError, Compiler
32
from DHParser.error import Error
33
34
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, _RE, \
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, _Token
35
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
36
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
37
38
39
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
    typing
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
40
    reduce_single_child, replace_by_single_child, remove_expendables, \
41
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
42
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
43
44
from typing import Callable, Dict, List, Set, Tuple

45

46
__all__ = ('get_ebnf_preprocessor',
47
48
49
50
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
51
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
52
           'EBNFCompilerError',
53
           'EBNFCompiler',
54
           'grammar_changed',
55
           'PreprocessorFactoryFunc',
56
57
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
58
           'CompilerFactoryFunc')
59
60


Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
########################################################################
#
# EBNF scanning
#
########################################################################


68
69
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
70
71
72
73
74
75
76
77


########################################################################
#
# EBNF parsing
#
########################################################################

78

di68kap's avatar
di68kap committed
79
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
    r"""
    Parser for an EBNF source file, with this grammar::

        # EBNF-Grammar in EBNF

        @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
        @ whitespace = /\s*/                            # whitespace includes linefeed
        @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

        syntax     = [~//] { definition | directive } §EOF
        definition = symbol §"=" expression
        directive  = "@" §symbol "=" ( regexp | literal | list_ )

        expression = term { "|" term }
        term       = { ["§"] factor }+                       # "§" means all following factors mandatory
        factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                   | [flowmarker] literal
                   | [flowmarker] plaintext
                   | [flowmarker] regexp
                   | [flowmarker] whitespace
                   | [flowmarker] oneormore
                   | [flowmarker] group
                   | [flowmarker] unordered
                   | repetition
                   | option

        flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
                   | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
        retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

        group      = "(" §expression ")"
        unordered  = "<" §expression ">"                # elements of expression in arbitrary order
        oneormore  = "{" expression "}+"
        repetition = "{" §expression "}"
        option     = "[" §expression "]"

        symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
        literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                   | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
        plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
120
        regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
eckhart's avatar
eckhart committed
121
122
123
124
125
126
                                                        # '~' is a whitespace-marker, if present leading or trailing
                                                        # whitespace of a regular expression will be ignored tacitly.
        whitespace = /~/~                               # implicit or default whitespace
        list_      = /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                        # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
        EOF = !/./
127
    """
di68kap's avatar
di68kap committed
128
    expression = Forward()
129
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
130
131
132
133
134
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
135
    whitespace__ = Whitespace(WSP__)
di68kap's avatar
di68kap committed
136
    EOF = NegativeLookahead(RegExp('.'))
137
138
139
140
141
142
    list_ = Series(RegExp('\\w+'), whitespace__, ZeroOrMore(Series(_Token(","), RegExp('\\w+'), whitespace__)))
    whitespace = Series(RegExp('~'), whitespace__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), whitespace__)
    plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), whitespace__)
    literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), whitespace__), Series(RegExp("'(?:[^']|\\\\')*?'"), whitespace__))
    symbol = Series(RegExp('(?!\\d)\\w+'), whitespace__)
143
144
145
146
147
148
149
150
    option = Series(_Token("["), expression, _Token("]"), mandatory=1)
    repetition = Series(_Token("{"), expression, _Token("}"), mandatory=1)
    oneormore = Series(_Token("{"), expression, _Token("}+"))
    unordered = Series(_Token("<"), expression, _Token(">"), mandatory=1)
    group = Series(_Token("("), expression, _Token(")"), mandatory=1)
    retrieveop = Alternative(_Token("::"), _Token(":"))
    flowmarker = Alternative(_Token("!"), _Token("&"), _Token("-!"), _Token("-&"))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(_Token("="))),
151
152
                         Series(Option(flowmarker), literal), Series(Option(flowmarker), plaintext),
                         Series(Option(flowmarker), regexp), Series(Option(flowmarker), whitespace),
153
154
                         Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group),
155
                         Series(Option(flowmarker), unordered), repetition, option)
156
157
158
159
    term = OneOrMore(Series(Option(_Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(_Token("|"), term))))
    directive = Series(_Token("@"), symbol, _Token("="), Alternative(regexp, literal, list_), mandatory=1)
    definition = Series(symbol, _Token("="), expression, mandatory=1)
160
    syntax = Series(Option(Series(whitespace__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
161
162
163
    root__ = syntax


164
def grammar_changed(grammar_class, grammar_source: str) -> bool:
165
166
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
185
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
186
187
188
189
190
191
192
193
194
195
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


196
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


213
EBNF_AST_transformation_table = {
214
    # AST Transformations for EBNF-grammar
215
    "+":
216
        remove_expendables,
217
    "syntax":
218
        [],  # otherwise '"*": replace_by_single_child' would be applied
219
    "directive, definition":
220
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
221
    "expression":
222
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
223
    "term":
224
225
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
226
    "factor, flowmarker, retrieveop":
227
        replace_by_single_child,
228
    "group":
229
        [remove_brackets, replace_by_single_child],
230
231
    "unordered":
        remove_brackets,
232
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
233
        [reduce_single_child, remove_brackets,
234
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
235
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
236
        reduce_single_child,
237
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
238
        reduce_single_child,
239
    "list_":
240
        [flatten, remove_infix_operator],
241
    "*":
242
        replace_by_single_child
243
244
}

245

Eckhart Arnold's avatar
Eckhart Arnold committed
246
def EBNFTransform() -> TransformationFunc:
247
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
248

249
def get_ebnf_transformer() -> TransformationFunc:
250
251
252
253
254
255
256
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
257
258
259
260
261
262
263
264


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

265

266
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
267
ParserFactoryFunc = Callable[[], Grammar]
268
TransformerFactoryFunc = Callable[[], TransformationFunc]
269
270
CompilerFactoryFunc = Callable[[], Compiler]

271
272
273
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
274
275
276
277
'''


GRAMMAR_FACTORY = '''
278
def get_grammar() -> {NAME}Grammar:
279
280
281
282
283
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
284
285
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
286
287
288
289
'''


TRANSFORMER_FACTORY = '''
290
291
292
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

293
def get_transformer() -> TransformationFunc:
294
295
296
297
298
299
300
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
301
302
303
304
'''


COMPILER_FACTORY = '''
305
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
306
307
308
309
310
311
312
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
313
314
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
315
316
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
317

eckhart's avatar
eckhart committed
318
class EBNFCompilerError(CompilerError):
319
    """Error raised by `EBNFCompiler` class. (Not compilation errors
320
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
321
322
323
    pass


324
class EBNFCompiler(Compiler):
325
326
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
327
    in EBNF-Notation.
328
329
330
331
332
333
334
335
336
337
338
339
340

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
341
        current_symbols:  During compilation, a list containing the root
342
343
344
345
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

346
        rules:  Dictionary that maps rule names to a list of Nodes that
347
348
349
350
351
352
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

353
                Now `[node.content for node in self.rules['alternative']]`
354
355
                yields `['alternative = a | b', 'a', 'b']`

356
        symbols:  A mapping of symbol names to their first usage (not
357
358
                their definition!) in the EBNF source.

359
        variables:  A set of symbols names that are used with the
360
361
362
363
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

364
        recursive:  A set of symbols that are used recursively and
365
366
                therefore require a `Forward`-operator.

367
        definitions:  A dictionary of definitions. Other than `rules`
368
369
                this maps the symbols to their compiled definienda.

370
        deferred_taks:  A list of callables that is filled during
371
372
373
374
375
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

376
        root:   The name of the root symbol.
377

378
        directives:  A dictionary of all directives and their default
379
                values.
380
381
382

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
383
384
    """
    COMMENT_KEYWORD = "COMMENT__"
385
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
386
    RAW_WS_KEYWORD = "WHITESPACE__"
387
    WHITESPACE_PARSER_KEYWORD = "whitespace__"
Eckhart Arnold's avatar
Eckhart Arnold committed
388
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
389
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
390
                "Potentially due to erroneous AST transformation."
391
392
393
394
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
395
396
397
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
398
    REPEATABLE_DIRECTIVES = {'tokens'}
399

400

401
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
402
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
403
404
        self._reset()

405

406
    def _reset(self):
407
        super(EBNFCompiler, self)._reset()
408
        self._result = ''           # type: str
409
        self.re_flags = set()       # type: Set[str]
410
411
412
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
413
414
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
415
        self.definitions = {}       # type: Dict[str, str]
416
        self.deferred_tasks = []    # type: List[Callable]
417
        self.root_symbol = ""       # type: str
eckhart's avatar
eckhart committed
418
        self.directives = {'whitespace': self.WHITESPACE['vertical'],
419
                           'comment': '',
420
                           'literalws': {'right'},
421
                           'tokens': set(),  # alt. 'preprocessor_tokens'
422
423
424
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
425

Eckhart Arnold's avatar
Eckhart Arnold committed
426
    @property
427
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
428
429
        return self._result

430
    # methods for generating skeleton code for preprocessor, transformer, and compiler
431

432
    def gen_preprocessor_skeleton(self) -> str:
433
434
435
436
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
437
        name = self.grammar_name + "Preprocessor"
438
        return "def %s(text):\n    return text, lambda i: i\n" % name \
439
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
440

441

442
    def gen_transformer_skeleton(self) -> str:
443
444
445
446
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
447
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
448
449
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
450
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
451
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
452
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
453
        transtable.append('    "+": remove_empty,')
454
        for name in self.rules:
eckhart's avatar
eckhart committed
455
            transformations = '[]'
456
457
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
458
                transformations = '[replace_or_reduce]'
459
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
460
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
461
            transtable.append('    "' + name + '": %s,' % transformations)
462
        transtable.append('    ":_Token, :_RE": reduce_single_child,')
463
        transtable += ['    "*": replace_by_single_child', '}', '']
464
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
465
466
        return '\n'.join(transtable)

467

468
    def gen_compiler_skeleton(self) -> str:
469
470
471
472
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
473
        if not self.rules:
474
475
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
476
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
477
478
479
480
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
481
                    self.grammar_name + '", grammar_source=""):',
482
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
483
                    'Compiler, self).__init__(grammar_name, grammar_source)',
484
485
486
487
                    r"        assert re.match('\w+\Z', grammar_name)", '',
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
488
        for name in self.rules:
489
            method_name = Compiler.method_name(name)
490
            if name == self.root_symbol:
491
                compiler += ['    def ' + method_name + '(self, node):',
492
                             '        return self.fallback_compiler(node)', '']
493
            else:
di68kap's avatar
di68kap committed
494
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
495
                             '    #     return node', '']
496
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
497
        return '\n'.join(compiler)
498

499
    def verify_transformation_table(self, transtable):
500
501
502
503
504
505
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
506
507
508
509
510
511
512
513
        assert self._dirty_flag
        table_entries = set(expand_table(transtable).keys()) - {'*', '+', '~'}
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
Eckhart Arnold's avatar
Eckhart Arnold committed
514
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
515
516
        return messages

517

518
519
520
521
522
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
523
524
525
526
527
528
529
530
531
532

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

533
534
535
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
536
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
537

538
539
        # add special fields for Grammar class

540
541
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
542
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
543
                            if 'right' in self.directives['literalws'] else "''"))
544
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
545
                            if 'left' in self.directives['literalws'] else "''"))
546
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
547
548
549
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
550
551
552
553
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
554

555
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
556
        declarations = ['class ' + self.grammar_name +
557
                        'Grammar(Grammar):',
558
559
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
560
                        (', with this grammar:' if self.grammar_source else '.')]
561
        definitions.append(('parser_initialization__', '"upon instantiation"'))
562
        if self.grammar_source:
563
            definitions.append(('source_hash__',
564
                                '"%s"' % md5(self.grammar_source, __version__)))
565
            declarations.append('')
566
            declarations += [line for line in self.grammar_source.split('\n')]
567
568
569
570
571
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
572

573
        self.root_symbol = definitions[0][0] if definitions else ""
574
575
576
577
578
579
580
581
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
582
583
584
585
586
587

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
588
                self.tree.new_error(self.symbols[symbol],
589
                               "Missing definition for symbol '%s'" % symbol)
590
                # root_node.error_flag = True
591
592
593

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
594
595
596
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
597
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
598
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
599
600
601
602
603
604
605
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
606
607
608
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
609

610
        # set root_symbol parser and assemble python grammar definition
611

612
613
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
614
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
615
616
617
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
618

619
620
621

    ## compilation methods

622
    def on_syntax(self, node: Node) -> str:
623
        definitions = []  # type: List[Tuple[str, str]]
624
625

        # drop the wrapping sequence node
626
627
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
628
629

        # compile definitions and directives and collect definitions
630
        for nd in node.children:
631
            if nd.parser.name == "definition":
632
                definitions.append(self.compile(nd))
633
            else:
634
                assert nd.parser.name == "directive", nd.as_sxpr()
635
                self.compile(nd)
636
            # node.error_flag = max(node.error_flag, nd.error_flag)
637
        self.definitions.update(definitions)
638

639
        return self.assemble_parser(definitions, node)
640

641

642
    def on_definition(self, node: Node) -> Tuple[str, str]:
643
        rule = node.children[0].content
644
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
645
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
646
            if not first.errors:
eckhart's avatar
eckhart committed
647
                self.tree.new_error(first, 'First definition of rule "%s" '
648
                               'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
649
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
650
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
651
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
652
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
653
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
654
                                ' end with a doube underscore "__".' % rule)
655
        elif rule in self.directives['tokens']:
eckhart's avatar
eckhart committed
656
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
657
                                'a preprocessor token.' % rule)
658
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
659
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
660
                                % rule + '(This may change in the future.)')
661
        try:
662
663
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
664
            defn = self.compile(node.children[1])
665
            if rule in self.variables:
666
                defn = 'Capture(%s)' % defn
667
                self.variables.remove(rule)
668
669
670
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
671
        except TypeError as error:
672
673
674
675
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
676
            self.tree.new_error(node, errmsg)
677
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
678
        return rule, defn
679

680

681
    def _check_rx(self, node: Node, rx: str) -> str:
682
683
        """
        Checks whether the string `rx` represents a valid regular
684
685
686
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
687
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
688
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
689
690
691
        try:
            re.compile(rx)
        except Exception as re_error:
eckhart's avatar
eckhart committed
692
            self.tree.new_error(node, "malformed regular expression %s: %s" %
eckhart's avatar
eckhart committed
693
                                (repr(rx), str(re_error)))
694
695
        return rx

696

697
    def on_directive(self, node: Node) -> str:
698
        key = node.children[0].content.lower()
699
        assert key not in self.directives['tokens']
700

701
702
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
703
                self.tree.new_error(node, 'Directive "%s" has already been defined earlier. '
eckhart's avatar
eckhart committed
704
705
                                    % key + 'Later definition will be ignored!',
                                    code=Error.REDEFINED_DIRECTIVE_WARNING)
706
707
708
                return ""
            self.defined_directives.add(key)

709
        if key in {'comment', 'whitespace'}:
710
711
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
eckhart's avatar
eckhart committed
712
                    self.tree.new_error(node, 'Directive "%s" must have one, but not %i values.'
eckhart's avatar
eckhart committed
713
                                        % (key, len(node.children[1].result)))
714
                value = self.compile(node.children[1]).pop()
715
716
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
717
                else:
eckhart's avatar
eckhart committed
718
                    self.tree.new_error(node, 'Value "%s" not allowed for directive "%s".'
eckhart's avatar
eckhart committed
719
                                        % (value, key))
720
            else:
721
722
723
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
eckhart's avatar
eckhart committed
724
                    self.tree.new_error(node, "Whitespace marker '~' not allowed in definition "
eckhart's avatar
eckhart committed
725
                                        "of %s regular expression." % key)
726
727
728
729
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
730
                if key == 'whitespace' and not re.match(value, ''):
eckhart's avatar
eckhart committed
731
                    self.tree.new_error(node, "Implicit whitespace should always "
eckhart's avatar
eckhart committed
732
                                        "match the empty string, /%s/ does not." % value)
733
            self.directives[key] = value
734

735
        elif key == 'ignorecase':
736
            if node.children[1].content.lower() not in {"off", "false", "no"}:
737
738
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
739
        # elif key == 'testing':
740
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
741
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
742

743
        elif key == 'literalws':
744
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
745
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
746
                    or ('none' in value and len(value) > 1)):
eckhart's avatar
eckhart committed
747
                self.tree.new_error(node, 'Directive "literalws" allows only `left`, `right`, '
eckhart's avatar
eckhart committed
748
                                    '`both` or `none`, not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
749
            wsp = {'left', 'right'} if 'both' in value \
750
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
751
            self.directives[key] = list(wsp)
752

753
        elif key in {'tokens', 'preprocessor_tokens'}:
754
            tokens = self.compile(node.children[1])
755
            redeclared = self.directives['tokens'] & tokens
756
            if redeclared:
eckhart's avatar
eckhart committed
757
                self.tree.new_error(node, 'Tokens %s have already been declared earlier. '
eckhart's avatar
eckhart committed
758
759
                                    % str(redeclared) + 'Later declaration will be ignored',
                                    code=Error.REDECLARED_TOKEN_WARNING)
760
            self.directives['tokens'] |= tokens - redeclared
761

762
        elif key.endswith('_filter'):
763
            filter_set = self.compile(node.children[1])
764
            if not isinstance(filter_set, set) or len(filter_set) != 1:
eckhart's avatar
eckhart committed
765
                self.tree.new_error(node, 'Directive "%s" accepts exactly on symbol, not %s'
eckhart's avatar
eckhart committed
766
                                    % (key, str(filter_set)))
767
            self.directives['filter'][key[:-7]] = filter_set.pop()
768

769
        else:
eckhart's avatar
eckhart committed
770
            self.tree.new_error(node, 'Unknown directive %s ! (Known ones are %s .)' %
eckhart's avatar
eckhart committed
771
                                (key, ', '.join(list(self.directives.keys()))))
772
773
        return ""

774

775
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
776
777
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
778
779
        name for the particular non-terminal.
        """
780
        # print(node.as_sxpr())
781
        arguments = [self.compile(r) for r in node.children] + custom_args
782
        # node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
783
784
        return parser_class + '(' + ', '.join(arguments) + ')'

785

786
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
787
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
788
789
        return self.non_terminal(node, 'Alternative')

790

791
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
792
793
794
795
796
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
eckhart's avatar
eckhart committed
797
        filtered_children = []  # type: List[Node]
di68kap's avatar
di68kap committed
798
        for nd in node.children:
799
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
eckhart's avatar
eckhart committed
800
801
                mandatory_marker.append(len(filtered_children))
                # if len(filtered_children) == 0:
eckhart's avatar
eckhart committed
802
                #     self.tree.new_error(nd.pos, 'First item of a series should not be mandatory.',
eckhart's avatar
eckhart committed
803
                #                         Error.WARNING)
eckhart's avatar
eckhart committed
804
                if len(mandatory_marker) > 1:
eckhart's avatar
eckhart committed
805
                    self.tree.new_error(nd, 'One mandatory marker (§) sufficient to declare '
eckhart's avatar
eckhart committed
806
                                        'the rest of the series as mandatory.', Error.WARNING)
di68kap's avatar
di68kap committed
807
808
809
810
            else:
                filtered_children.append(nd)
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
811
812
813
814
815
        if len(filtered_children) == 1:
            compiled = self.non_terminal(node, 'Required')
        else:
            custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
            compiled = self.non_terminal(node, 'Series', custom_args)
di68kap's avatar
di68kap committed
816
817
        node.result = saved_result
        return compiled
818

819

820
    def on_factor(self, node: Node) -> str:
821
        assert node.children
822
        assert len(node.children) >= 2, node.as_sxpr()
823
        prefix = node.children[0].content
824
        custom_args = []  # type: List[str]
825
826

        if prefix in {'::', ':'}:
827
828
            assert len(node.children) == 2
            arg = node.children[-1]
829
            if arg.parser.name != 'symbol':
eckhart's avatar
eckhart committed
830
                self.tree.new_error(node, ('Retrieve Operator "%s" requires a symbol, '
eckhart's avatar
eckhart committed
831
                                    'and not a %s.') % (prefix, str(arg.parser)))
832
                return str(arg.result)
833
            if str(arg) in self.directives['filter']:
834
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
835
            self.variables.add(str(arg))  # cast(str, arg.result)
836

837
        elif len(node.children) > 2:
838
839
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
840
841
842
843
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
844

845
        node.result = node.children[1:]
846
847
        try:
            parser_class = self.PREFIX_TABLE[prefix]
848
849
850
851
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
852
853
854
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
855
                        symlist = self.rules.get(nd.content, [])
856
857
858
859
860
861
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
862
863
                    if (nd.parser.name != "regexp" or nd.content[:1] != '/'
                            or nd.content[-1:] != '/'):