ebnf.py 41.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
import keyword
28
from collections import OrderedDict
29
from functools import partial
30

eckhart's avatar
eckhart committed
31
from DHParser.compile import CompilerError, Compiler
32
from DHParser.error import Error
33
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, RE, \
34
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
35
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
36
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
37
38
39
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
    typing
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
40
    reduce_single_child, replace_by_single_child, remove_expendables, \
41
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
42
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
43
44
from typing import Callable, Dict, List, Set, Tuple

45

46
__all__ = ('get_ebnf_preprocessor',
47
48
49
50
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
51
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
52
           'EBNFCompilerError',
53
           'EBNFCompiler',
54
           'grammar_changed',
55
           'PreprocessorFactoryFunc',
56
57
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
58
           'CompilerFactoryFunc')
59
60


Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
########################################################################
#
# EBNF scanning
#
########################################################################


68
69
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
70
71
72
73
74
75
76
77


########################################################################
#
# EBNF parsing
#
########################################################################

78

di68kap's avatar
di68kap committed
79
class EBNFGrammar(Grammar):
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" ( regexp | literal | list_ )

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /~?\/(?:\\\/|[^\/])*?\/~?/~        # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                    # '~' is a whitespace-marker, if present leading or trailing
                                                    # whitespace of a regular expression will be ignored tacitly.
    whitespace = /~/~                               # implicit or default whitespace
    list_      = /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                    # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF = !/./
    """
di68kap's avatar
di68kap committed
127
    expression = Forward()
128
129
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
130
131
132
133
134
135
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
136
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
137
    whitespace = RE('~')
di68kap's avatar
di68kap committed
138
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
139
    plaintext = RE('`(?:[^"]|\\\\")*?`')
di68kap's avatar
di68kap committed
140
141
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
142
143
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
144
    oneormore = Series(Token("{"), expression, Token("}+"))
145
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
146
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
147
148
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
149
150
151
152
153
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Series(Option(flowmarker), literal), Series(Option(flowmarker), plaintext),
                         Series(Option(flowmarker), regexp), Series(Option(flowmarker), whitespace),
                         Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
                         Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
154
155
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
156
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_), mandatory=1)
di68kap's avatar
di68kap committed
157
    definition = Series(symbol, Token("="), expression, mandatory=1)
158
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
159
160
161
    root__ = syntax


162
def grammar_changed(grammar_class, grammar_source: str) -> bool:
163
164
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
183
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
184
185
186
187
188
189
190
191
192
193
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


194
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


211
EBNF_AST_transformation_table = {
212
    # AST Transformations for EBNF-grammar
213
    "+":
214
        remove_expendables,
215
    "syntax":
216
        [],  # otherwise '"*": replace_by_single_child' would be applied
217
    "directive, definition":
218
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
219
    "expression":
220
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
221
    "term":
222
223
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
224
    "factor, flowmarker, retrieveop":
225
        replace_by_single_child,
226
    "group":
227
        [remove_brackets, replace_by_single_child],
228
229
    "unordered":
        remove_brackets,
230
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
231
        [reduce_single_child, remove_brackets,
232
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
233
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
234
        reduce_single_child,
235
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
236
        reduce_single_child,
237
    "list_":
238
        [flatten, remove_infix_operator],
239
    "*":
240
        replace_by_single_child
241
242
}

243

Eckhart Arnold's avatar
Eckhart Arnold committed
244
def EBNFTransform() -> TransformationFunc:
245
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
246

247
def get_ebnf_transformer() -> TransformationFunc:
248
249
250
251
252
253
254
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
255
256
257
258
259
260
261
262


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

263

264
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
265
ParserFactoryFunc = Callable[[], Grammar]
266
TransformerFactoryFunc = Callable[[], TransformationFunc]
267
268
CompilerFactoryFunc = Callable[[], Compiler]

269
270
271
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
272
273
274
275
'''


GRAMMAR_FACTORY = '''
276
def get_grammar() -> {NAME}Grammar:
277
278
279
280
281
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
282
283
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
284
285
286
287
'''


TRANSFORMER_FACTORY = '''
288
289
290
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

291
def get_transformer() -> TransformationFunc:
292
293
294
295
296
297
298
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
299
300
301
302
'''


COMPILER_FACTORY = '''
303
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
304
305
306
307
308
309
310
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
311
312
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
313
314
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
315

eckhart's avatar
eckhart committed
316
class EBNFCompilerError(CompilerError):
317
    """Error raised by `EBNFCompiler` class. (Not compilation errors
318
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
319
320
321
    pass


322
class EBNFCompiler(Compiler):
323
324
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
325
    in EBNF-Notation.
326
327
328
329
330
331
332
333
334
335
336
337
338

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
339
        current_symbols:  During compilation, a list containing the root
340
341
342
343
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

344
        rules:  Dictionary that maps rule names to a list of Nodes that
345
346
347
348
349
350
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

351
                Now `[node.content for node in self.rules['alternative']]`
352
353
                yields `['alternative = a | b', 'a', 'b']`

354
        symbols:  A mapping of symbol names to their first usage (not
355
356
                their definition!) in the EBNF source.

357
        variables:  A set of symbols names that are used with the
358
359
360
361
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

362
        recursive:  A set of symbols that are used recursively and
363
364
                therefore require a `Forward`-operator.

365
        definitions:  A dictionary of definitions. Other than `rules`
366
367
                this maps the symbols to their compiled definienda.

368
        deferred_taks:  A list of callables that is filled during
369
370
371
372
373
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

374
        root:   The name of the root symbol.
375

376
        directives:  A dictionary of all directives and their default
377
                values.
378
379
380

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
381
382
    """
    COMMENT_KEYWORD = "COMMENT__"
383
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
384
    RAW_WS_KEYWORD = "WHITESPACE__"
385
    WHITESPACE_PARSER_KEYWORD = "whitespace__"
Eckhart Arnold's avatar
Eckhart Arnold committed
386
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
387
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
388
                "Potentially due to erroneous AST transformation."
389
390
391
392
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
393
394
395
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
396
    REPEATABLE_DIRECTIVES = {'tokens'}
397

398

399
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
400
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
401
402
        self._reset()

403

404
    def _reset(self):
405
        super(EBNFCompiler, self)._reset()
406
        self._result = ''           # type: str
407
        self.re_flags = set()       # type: Set[str]
408
409
410
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
411
412
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
413
        self.definitions = {}       # type: Dict[str, str]
414
        self.deferred_tasks = []    # type: List[Callable]
415
        self.root_symbol = ""       # type: str
416
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
417
                           'comment': '',
418
                           'literalws': {'right'},
419
                           'tokens': set(),  # alt. 'preprocessor_tokens'
420
421
422
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
423

Eckhart Arnold's avatar
Eckhart Arnold committed
424
    @property
425
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
426
427
        return self._result

428
    # methods for generating skeleton code for preprocessor, transformer, and compiler
429

430
    def gen_preprocessor_skeleton(self) -> str:
431
432
433
434
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
435
        name = self.grammar_name + "Preprocessor"
436
        return "def %s(text):\n    return text, lambda i: i\n" % name \
437
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
438

439

440
    def gen_transformer_skeleton(self) -> str:
441
442
443
444
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
445
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
446
447
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
448
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
449
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
450
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
451
        transtable.append('    "+": remove_empty,')
452
        for name in self.rules:
eckhart's avatar
eckhart committed
453
            transformations = '[]'
454
455
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
456
                transformations = '[replace_or_reduce]'
457
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
458
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
459
            transtable.append('    "' + name + '": %s,' % transformations)
Eckhart Arnold's avatar
Eckhart Arnold committed
460
        transtable.append('    ":Token, :RE": reduce_single_child,')
461
        transtable += ['    "*": replace_by_single_child', '}', '']
462
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
463
464
        return '\n'.join(transtable)

465

466
    def gen_compiler_skeleton(self) -> str:
467
468
469
470
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
471
        if not self.rules:
472
473
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
474
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
475
476
477
478
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
479
                    self.grammar_name + '", grammar_source=""):',
480
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
481
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
482
                    r"        assert re.match('\w+\Z', grammar_name)", '']
483
        for name in self.rules:
484
            method_name = Compiler.method_name(name)
485
            if name == self.root_symbol:
486
                compiler += ['    def ' + method_name + '(self, node):',
487
                             '        return self.fallback_compiler(node)', '']
488
            else:
di68kap's avatar
di68kap committed
489
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
490
                             '    #     return node', '']
491
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
492
        return '\n'.join(compiler)
493

494
    def verify_transformation_table(self, transtable):
495
496
497
498
499
500
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
501
502
503
504
505
506
507
508
509
510
511
        assert self._dirty_flag
        table_entries = set(expand_table(transtable).keys()) - {'*', '+', '~'}
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
                                      Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
        return messages

512

513
514
515
516
517
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
518
519
520
521
522
523
524
525
526
527

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

528
529
530
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
531
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
532

533
534
        # add special fields for Grammar class

535
536
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
537
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
538
                            if 'right' in self.directives['literalws'] else "''"))
539
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
540
                            if 'left' in self.directives['literalws'] else "''"))
541
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
542
543
544
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
545
546
547
548
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
549

550
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
551
        declarations = ['class ' + self.grammar_name +
552
                        'Grammar(Grammar):',
553
554
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
555
                        (', with this grammar:' if self.grammar_source else '.')]
556
        definitions.append(('parser_initialization__', '"upon instantiation"'))
557
        if self.grammar_source:
558
            definitions.append(('source_hash__',
559
                                '"%s"' % md5(self.grammar_source, __version__)))
560
            declarations.append('')
561
            declarations += [line for line in self.grammar_source.split('\n')]
562
563
564
565
566
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
567

568
        self.root_symbol = definitions[0][0] if definitions else ""
569
570
571
572
573
574
575
576
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
577
578
579
580
581
582
583

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
584
                # root_node.error_flag = True
585
586
587

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
588
589
590
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
591
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
592
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
593
594
595
596
597
598
599
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
600
            self.rules[leftover][0].add_error(
601
                ('Rule "%s" is not connected to parser root "%s" !') %
eckhart's avatar
eckhart committed
602
                (leftover, self.root_symbol), Error.WARNING)
603

604
        # set root_symbol parser and assemble python grammar definition
605

606
607
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
608
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
609
610
611
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
612

613
614
615

    ## compilation methods

616
    def on_syntax(self, node: Node) -> str:
617
        definitions = []  # type: List[Tuple[str, str]]
618
619

        # drop the wrapping sequence node
620
621
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
622
623

        # compile definitions and directives and collect definitions
624
        for nd in node.children:
625
            if nd.parser.name == "definition":
626
                definitions.append(self.compile(nd))
627
            else:
628
                assert nd.parser.name == "directive", nd.as_sxpr()
629
                self.compile(nd)
630
            node.error_flag = max(node.error_flag, nd.error_flag)
631
        self.definitions.update(definitions)
632

633
        return self.assemble_parser(definitions, node)
634

635

636
    def on_definition(self, node: Node) -> Tuple[str, str]:
637
        rule = node.children[0].content
638
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
639
640
641
642
643
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
644
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
645
646
647
648
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
649
        elif rule in self.directives['tokens']:
650
            node.add_error('Symbol "%s" has already been defined as '
651
                           'a preprocessor token.' % rule)
652
653
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
654
                           % rule + '(This may change in the future.)')
655
        try:
656
657
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
658
            defn = self.compile(node.children[1])
659
            if rule in self.variables:
660
                defn = 'Capture(%s)' % defn
661
                self.variables.remove(rule)
662
663
664
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
665
        except TypeError as error:
666
667
668
669
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
670
671
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
672
        return rule, defn
673

674

675
    def _check_rx(self, node: Node, rx: str) -> str:
676
677
        """
        Checks whether the string `rx` represents a valid regular
678
679
680
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
681
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
682
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
683
684
685
686
687
688
689
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

690

691
    def on_directive(self, node: Node) -> str:
692
        key = node.children[0].content.lower()
693
        assert key not in self.directives['tokens']
694

695
696
697
698
699
700
701
702
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
                node.add_error('Directive "%s" has already been defined earlier. ' % key + \
                               'Later definition will be ignored!',
                               code=Error.REDEFINED_DIRECTIVE_WARNING)
                return ""
            self.defined_directives.add(key)

703
        if key in {'comment', 'whitespace'}:
704
705
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
706
                    node.add_error('Directive "%s" must have one, but not %i values.' %
707
                                   (key, len(node.children[1].result)))
708
                value = self.compile(node.children[1]).pop()
709
710
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
711
                else:
712
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
713
            else:
714
715
716
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
717
718
719
720
721
722
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
723
724
725
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
726
            self.directives[key] = value
727

728
        elif key == 'ignorecase':
729
            if node.children[1].content.lower() not in {"off", "false", "no"}:
730
731
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
732
        # elif key == 'testing':
733
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
734
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
735

736
        elif key == 'literalws':
737
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
738
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
739
                    or ('none' in value and len(value) > 1)):
740
741
742
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
743
            wsp = {'left', 'right'} if 'both' in value \
744
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
745
            self.directives[key] = list(wsp)
746

747
        elif key in {'tokens', 'preprocessor_tokens'}:
748
            tokens = self.compile(node.children[1])
749
            redeclared = self.directives['tokens'] & tokens
750
751
752
753
754
            if redeclared:
                node.add_error('Tokens %s have already been declared earlier. '
                               % str(redeclared) + 'Later declaration will be ignored',
                               code=Error.REDECLARED_TOKEN_WARNING)
            self.directives['tokens'] |= tokens - redeclared
755

756
        elif key.endswith('_filter'):
757
            filter_set = self.compile(node.children[1])
758
759
760
761
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
762

763
764
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
765
                           (key, ', '.join(list(self.directives.keys()))))
766
767
        return ""

768

769
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
770
771
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
772
773
        name for the particular non-terminal.
        """
774
        arguments = [self.compile(r) for r in node.children] + custom_args
775
        node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
776
777
        return parser_class + '(' + ', '.join(arguments) + ')'

778

779
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
780
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
781
782
        return self.non_terminal(node, 'Alternative')

783

784
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
785
786
787
788
789
790
791
792
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
793
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
di68kap's avatar
di68kap committed
794
795
796
797
798
799
800
801
802
803
804
805
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
806
        custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
di68kap's avatar
di68kap committed
807
808
809
        compiled = self.non_terminal(node, 'Series', custom_args)
        node.result = saved_result
        return compiled
810

811

812
    def on_factor(self, node: Node) -> str:
813
        assert node.children
814
        assert len(node.children) >= 2, node.as_sxpr()
815
        prefix = node.children[0].content
816
        custom_args = []  # type: List[str]
817
818

        if prefix in {'::', ':'}:
819
820
            assert len(node.children) == 2
            arg = node.children[-1]
821
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
822
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
823
824
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
825
            if str(arg) in self.directives['filter']:
826
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
827
            self.variables.add(str(arg))  # cast(str, arg.result)
828

829
        elif len(node.children) > 2:
830
831
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
832
833
834
835
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
836

837
        node.result = node.children[1:]
838
839
        try:
            parser_class = self.PREFIX_TABLE[prefix]
840
841
842
843
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
844
845
846
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
847
                        symlist = self.rules.get(nd.content, [])
848
849
850
851
852
853
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
854
855
                    if (nd.parser.name != "regexp" or nd.content[:1] != '/'
                            or nd.content[-1:] != '/'):
856
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
857
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
858
859
860
861

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
862
863
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
864
        return ""
865

866

867
    def on_option(self, node) -> str:
868
        return self.non_terminal(node, 'Option')
869

870

871
    def on_repetition(self, node) -> str:
872
873
        return self.non_terminal(node, 'ZeroOrMore'