ebnf.py 41.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
import keyword
28
from collections import OrderedDict
29
from functools import partial
30

eckhart's avatar
eckhart committed
31
from DHParser.compile import CompilerError, Compiler
32
from DHParser.error import Error
33
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, RE, \
34
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
35
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
36
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
37
38
39
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
    typing
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
40
    reduce_single_child, replace_by_single_child, remove_expendables, \
41
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
42
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
43
44
from typing import Callable, Dict, List, Set, Tuple

45

46
__all__ = ('get_ebnf_preprocessor',
47
48
49
50
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
51
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
52
           'EBNFCompilerError',
53
           'EBNFCompiler',
54
           'grammar_changed',
55
           'PreprocessorFactoryFunc',
56
57
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
58
           'CompilerFactoryFunc')
59
60


Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
########################################################################
#
# EBNF scanning
#
########################################################################


68
69
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
70
71
72
73
74
75
76
77


########################################################################
#
# EBNF parsing
#
########################################################################

78

di68kap's avatar
di68kap committed
79
class EBNFGrammar(Grammar):
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" ( regexp | literal | list_ )

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /~?\/(?:\\\/|[^\/])*?\/~?/~        # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                    # '~' is a whitespace-marker, if present leading or trailing
                                                    # whitespace of a regular expression will be ignored tacitly.
    whitespace = /~/~                               # implicit or default whitespace
    list_      = /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                    # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF = !/./
    """
di68kap's avatar
di68kap committed
127
    expression = Forward()
128
129
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
130
131
132
133
134
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
135
    whitespace__ = Whitespace(WSP__)
di68kap's avatar
di68kap committed
136
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
137
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
138
    whitespace = RE('~')
di68kap's avatar
di68kap committed
139
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
140
    plaintext = RE('`(?:[^"]|\\\\")*?`')
di68kap's avatar
di68kap committed
141
142
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
143
144
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
145
    oneormore = Series(Token("{"), expression, Token("}+"))
146
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
147
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
148
149
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
150
151
152
153
154
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Series(Option(flowmarker), literal), Series(Option(flowmarker), plaintext),
                         Series(Option(flowmarker), regexp), Series(Option(flowmarker), whitespace),
                         Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
                         Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
155
156
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
157
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_), mandatory=1)
di68kap's avatar
di68kap committed
158
    definition = Series(symbol, Token("="), expression, mandatory=1)
159
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
160
161
162
    root__ = syntax


163
def grammar_changed(grammar_class, grammar_source: str) -> bool:
164
165
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
184
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
185
186
187
188
189
190
191
192
193
194
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


195
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


212
EBNF_AST_transformation_table = {
213
    # AST Transformations for EBNF-grammar
214
    "+":
215
        remove_expendables,
216
    "syntax":
217
        [],  # otherwise '"*": replace_by_single_child' would be applied
218
    "directive, definition":
219
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
220
    "expression":
221
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
222
    "term":
223
224
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
225
    "factor, flowmarker, retrieveop":
226
        replace_by_single_child,
227
    "group":
228
        [remove_brackets, replace_by_single_child],
229
230
    "unordered":
        remove_brackets,
231
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
232
        [reduce_single_child, remove_brackets,
233
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
234
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
235
        reduce_single_child,
236
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
237
        reduce_single_child,
238
    "list_":
239
        [flatten, remove_infix_operator],
240
    "*":
241
        replace_by_single_child
242
243
}

244

Eckhart Arnold's avatar
Eckhart Arnold committed
245
def EBNFTransform() -> TransformationFunc:
246
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
247

248
def get_ebnf_transformer() -> TransformationFunc:
249
250
251
252
253
254
255
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
256
257
258
259
260
261
262
263


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

264

265
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
266
ParserFactoryFunc = Callable[[], Grammar]
267
TransformerFactoryFunc = Callable[[], TransformationFunc]
268
269
CompilerFactoryFunc = Callable[[], Compiler]

270
271
272
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
273
274
275
276
'''


GRAMMAR_FACTORY = '''
277
def get_grammar() -> {NAME}Grammar:
278
279
280
281
282
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
283
284
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
285
286
287
288
'''


TRANSFORMER_FACTORY = '''
289
290
291
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

292
def get_transformer() -> TransformationFunc:
293
294
295
296
297
298
299
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
300
301
302
303
'''


COMPILER_FACTORY = '''
304
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
305
306
307
308
309
310
311
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
312
313
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
314
315
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
316

eckhart's avatar
eckhart committed
317
class EBNFCompilerError(CompilerError):
318
    """Error raised by `EBNFCompiler` class. (Not compilation errors
319
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
320
321
322
    pass


323
class EBNFCompiler(Compiler):
324
325
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
326
    in EBNF-Notation.
327
328
329
330
331
332
333
334
335
336
337
338
339

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
340
        current_symbols:  During compilation, a list containing the root
341
342
343
344
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

345
        rules:  Dictionary that maps rule names to a list of Nodes that
346
347
348
349
350
351
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

352
                Now `[node.content for node in self.rules['alternative']]`
353
354
                yields `['alternative = a | b', 'a', 'b']`

355
        symbols:  A mapping of symbol names to their first usage (not
356
357
                their definition!) in the EBNF source.

358
        variables:  A set of symbols names that are used with the
359
360
361
362
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

363
        recursive:  A set of symbols that are used recursively and
364
365
                therefore require a `Forward`-operator.

366
        definitions:  A dictionary of definitions. Other than `rules`
367
368
                this maps the symbols to their compiled definienda.

369
        deferred_taks:  A list of callables that is filled during
370
371
372
373
374
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

375
        root:   The name of the root symbol.
376

377
        directives:  A dictionary of all directives and their default
378
                values.
379
380
381

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
382
383
    """
    COMMENT_KEYWORD = "COMMENT__"
384
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
385
    RAW_WS_KEYWORD = "WHITESPACE__"
386
    WHITESPACE_PARSER_KEYWORD = "whitespace__"
Eckhart Arnold's avatar
Eckhart Arnold committed
387
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
388
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
389
                "Potentially due to erroneous AST transformation."
390
391
392
393
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
394
395
396
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
397
    REPEATABLE_DIRECTIVES = {'tokens'}
398

399

400
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
401
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
402
403
        self._reset()

404

405
    def _reset(self):
406
        super(EBNFCompiler, self)._reset()
407
        self._result = ''           # type: str
408
        self.re_flags = set()       # type: Set[str]
409
410
411
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
412
413
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
414
        self.definitions = {}       # type: Dict[str, str]
415
        self.deferred_tasks = []    # type: List[Callable]
416
        self.root_symbol = ""       # type: str
eckhart's avatar
eckhart committed
417
        self.directives = {'whitespace': self.WHITESPACE['vertical'],
418
                           'comment': '',
419
                           'literalws': {'right'},
420
                           'tokens': set(),  # alt. 'preprocessor_tokens'
421
422
423
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
424

Eckhart Arnold's avatar
Eckhart Arnold committed
425
    @property
426
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
427
428
        return self._result

429
    # methods for generating skeleton code for preprocessor, transformer, and compiler
430

431
    def gen_preprocessor_skeleton(self) -> str:
432
433
434
435
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
436
        name = self.grammar_name + "Preprocessor"
437
        return "def %s(text):\n    return text, lambda i: i\n" % name \
438
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
439

440

441
    def gen_transformer_skeleton(self) -> str:
442
443
444
445
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
446
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
447
448
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
449
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
450
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
451
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
452
        transtable.append('    "+": remove_empty,')
453
        for name in self.rules:
eckhart's avatar
eckhart committed
454
            transformations = '[]'
455
456
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
457
                transformations = '[replace_or_reduce]'
458
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
459
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
460
            transtable.append('    "' + name + '": %s,' % transformations)
Eckhart Arnold's avatar
Eckhart Arnold committed
461
        transtable.append('    ":Token, :RE": reduce_single_child,')
462
        transtable += ['    "*": replace_by_single_child', '}', '']
463
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
464
465
        return '\n'.join(transtable)

466

467
    def gen_compiler_skeleton(self) -> str:
468
469
470
471
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
472
        if not self.rules:
473
474
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
475
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
476
477
478
479
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
480
                    self.grammar_name + '", grammar_source=""):',
481
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
482
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
483
                    r"        assert re.match('\w+\Z', grammar_name)", '']
484
        for name in self.rules:
485
            method_name = Compiler.method_name(name)
486
            if name == self.root_symbol:
487
                compiler += ['    def ' + method_name + '(self, node):',
488
                             '        return self.fallback_compiler(node)', '']
489
            else:
di68kap's avatar
di68kap committed
490
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
491
                             '    #     return node', '']
492
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
493
        return '\n'.join(compiler)
494

495
    def verify_transformation_table(self, transtable):
496
497
498
499
500
501
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
502
503
504
505
506
507
508
509
510
511
512
        assert self._dirty_flag
        table_entries = set(expand_table(transtable).keys()) - {'*', '+', '~'}
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
                                      Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
        return messages

513

514
515
516
517
518
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
519
520
521
522
523
524
525
526
527
528

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

529
530
531
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
532
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
533

534
535
        # add special fields for Grammar class

536
537
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
538
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
539
                            if 'right' in self.directives['literalws'] else "''"))
540
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
541
                            if 'left' in self.directives['literalws'] else "''"))
542
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
543
544
545
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
546
547
548
549
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
550

551
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
552
        declarations = ['class ' + self.grammar_name +
553
                        'Grammar(Grammar):',
554
555
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
556
                        (', with this grammar:' if self.grammar_source else '.')]
557
        definitions.append(('parser_initialization__', '"upon instantiation"'))
558
        if self.grammar_source:
559
            definitions.append(('source_hash__',
560
                                '"%s"' % md5(self.grammar_source, __version__)))
561
            declarations.append('')
562
            declarations += [line for line in self.grammar_source.split('\n')]
563
564
565
566
567
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
568

569
        self.root_symbol = definitions[0][0] if definitions else ""
570
571
572
573
574
575
576
577
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
578
579
580
581
582
583
584

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
585
                # root_node.error_flag = True
586
587
588

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
589
590
591
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
592
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
593
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
594
595
596
597
598
599
600
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
601
            self.rules[leftover][0].add_error(
602
                ('Rule "%s" is not connected to parser root "%s" !') %
eckhart's avatar
eckhart committed
603
                (leftover, self.root_symbol), Error.WARNING)
604

605
        # set root_symbol parser and assemble python grammar definition
606

607
608
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
609
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
610
611
612
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
613

614
615
616

    ## compilation methods

617
    def on_syntax(self, node: Node) -> str:
618
        definitions = []  # type: List[Tuple[str, str]]
619
620

        # drop the wrapping sequence node
621
622
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
623
624

        # compile definitions and directives and collect definitions
625
        for nd in node.children:
626
            if nd.parser.name == "definition":
627
                definitions.append(self.compile(nd))
628
            else:
629
                assert nd.parser.name == "directive", nd.as_sxpr()
630
                self.compile(nd)
631
            node.error_flag = max(node.error_flag, nd.error_flag)
632
        self.definitions.update(definitions)
633

634
        return self.assemble_parser(definitions, node)
635

636

637
    def on_definition(self, node: Node) -> Tuple[str, str]:
638
        rule = node.children[0].content
639
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
640
641
642
643
644
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
645
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
646
647
648
649
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
650
        elif rule in self.directives['tokens']:
651
            node.add_error('Symbol "%s" has already been defined as '
652
                           'a preprocessor token.' % rule)
653
654
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
655
                           % rule + '(This may change in the future.)')
656
        try:
657
658
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
659
            defn = self.compile(node.children[1])
660
            if rule in self.variables:
661
                defn = 'Capture(%s)' % defn
662
                self.variables.remove(rule)
663
664
665
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
666
        except TypeError as error:
667
668
669
670
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
671
672
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
673
        return rule, defn
674

675

676
    def _check_rx(self, node: Node, rx: str) -> str:
677
678
        """
        Checks whether the string `rx` represents a valid regular
679
680
681
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
682
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
683
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
684
685
686
687
688
689
690
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

691

692
    def on_directive(self, node: Node) -> str:
693
        key = node.children[0].content.lower()
694
        assert key not in self.directives['tokens']
695

696
697
698
699
700
701
702
703
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
                node.add_error('Directive "%s" has already been defined earlier. ' % key + \
                               'Later definition will be ignored!',
                               code=Error.REDEFINED_DIRECTIVE_WARNING)
                return ""
            self.defined_directives.add(key)

704
        if key in {'comment', 'whitespace'}:
705
706
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
707
                    node.add_error('Directive "%s" must have one, but not %i values.' %
708
                                   (key, len(node.children[1].result)))
709
                value = self.compile(node.children[1]).pop()
710
711
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
712
                else:
713
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
714
            else:
715
716
717
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
718
719
720
721
722
723
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
724
725
726
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
727
            self.directives[key] = value
728

729
        elif key == 'ignorecase':
730
            if node.children[1].content.lower() not in {"off", "false", "no"}:
731
732
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
733
        # elif key == 'testing':
734
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
735
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
736

737
        elif key == 'literalws':
738
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
739
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
740
                    or ('none' in value and len(value) > 1)):
741
742
743
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
744
            wsp = {'left', 'right'} if 'both' in value \
745
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
746
            self.directives[key] = list(wsp)
747

748
        elif key in {'tokens', 'preprocessor_tokens'}:
749
            tokens = self.compile(node.children[1])
750
            redeclared = self.directives['tokens'] & tokens
751
752
753
754
755
            if redeclared:
                node.add_error('Tokens %s have already been declared earlier. '
                               % str(redeclared) + 'Later declaration will be ignored',
                               code=Error.REDECLARED_TOKEN_WARNING)
            self.directives['tokens'] |= tokens - redeclared
756

757
        elif key.endswith('_filter'):
758
            filter_set = self.compile(node.children[1])
759
760
761
762
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
763

764
765
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
766
                           (key, ', '.join(list(self.directives.keys()))))
767
768
        return ""

769

770
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
771
772
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
773
774
        name for the particular non-terminal.
        """
775
        arguments = [self.compile(r) for r in node.children] + custom_args
776
        node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
777
778
        return parser_class + '(' + ', '.join(arguments) + ')'

779

780
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
781
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
782
783
        return self.non_terminal(node, 'Alternative')

784

785
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
786
787
788
789
790
791
792
793
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
794
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
di68kap's avatar
di68kap committed
795
796
797
798
799
800
801
802
803
804
805
806
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
807
        custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
di68kap's avatar
di68kap committed
808
809
810
        compiled = self.non_terminal(node, 'Series', custom_args)
        node.result = saved_result
        return compiled
811

812

813
    def on_factor(self, node: Node) -> str:
814
        assert node.children
815
        assert len(node.children) >= 2, node.as_sxpr()
816
        prefix = node.children[0].content
817
        custom_args = []  # type: List[str]
818
819

        if prefix in {'::', ':'}:
820
821
            assert len(node.children) == 2
            arg = node.children[-1]
822
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
823
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
824
825
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
826
            if str(arg) in self.directives['filter']:
827
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
828
            self.variables.add(str(arg))  # cast(str, arg.result)
829

830
        elif len(node.children) > 2:
831
832
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
833
834
835
836
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
837

838
        node.result = node.children[1:]
839
840
        try:
            parser_class = self.PREFIX_TABLE[prefix]
841
842
843
844
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
845
846
847
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
848
                        symlist = self.rules.get(nd.content, [])
849
850
851
852
853
854
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
855
856
                    if (nd.parser.name != "regexp" or nd.content[:1] != '/'
                            or nd.content[-1:] != '/'):
857
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
858
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
859
860
861
862

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
863
864
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
865
        return ""
866

867

868
    def on_option(self, node) -> str:
869
        return self.non_terminal(node, 'Option')
870

871