ebnf.py 43.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
import keyword
28
from collections import OrderedDict
29
from functools import partial
30

eckhart's avatar
eckhart committed
31
from DHParser.compile import CompilerError, Compiler
32
from DHParser.error import Error
33
34
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
35
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
36
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
37
38
39
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
    typing
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
40
    reduce_single_child, replace_by_single_child, remove_expendables, \
41
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
42
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
43
44
from typing import Callable, Dict, List, Set, Tuple

45

46
__all__ = ('get_ebnf_preprocessor',
47
48
49
50
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
51
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
52
           'EBNFCompilerError',
53
           'EBNFCompiler',
54
           'grammar_changed',
55
           'PreprocessorFactoryFunc',
56
57
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
58
           'CompilerFactoryFunc')
59
60


Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
########################################################################
#
# EBNF scanning
#
########################################################################


68
69
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
70
71
72
73
74
75
76
77


########################################################################
#
# EBNF parsing
#
########################################################################

78

di68kap's avatar
di68kap committed
79
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
    r"""
    Parser for an EBNF source file, with this grammar::

        # EBNF-Grammar in EBNF

        @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
        @ whitespace = /\s*/                            # whitespace includes linefeed
        @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

        syntax     = [~//] { definition | directive } §EOF
        definition = symbol §"=" expression
        directive  = "@" §symbol "=" ( regexp | literal | list_ )

        expression = term { "|" term }
        term       = { ["§"] factor }+                       # "§" means all following factors mandatory
        factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                   | [flowmarker] literal
                   | [flowmarker] plaintext
                   | [flowmarker] regexp
                   | [flowmarker] whitespace
                   | [flowmarker] oneormore
                   | [flowmarker] group
                   | [flowmarker] unordered
                   | repetition
                   | option

        flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
                   | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
        retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

        group      = "(" §expression ")"
        unordered  = "<" §expression ">"                # elements of expression in arbitrary order
        oneormore  = "{" expression "}+"
        repetition = "{" §expression "}"
        option     = "[" §expression "]"

        symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
        literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                   | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
        plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
120
        regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
eckhart's avatar
eckhart committed
121
122
123
124
125
126
                                                        # '~' is a whitespace-marker, if present leading or trailing
                                                        # whitespace of a regular expression will be ignored tacitly.
        whitespace = /~/~                               # implicit or default whitespace
        list_      = /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                        # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
        EOF = !/./
127
    """
di68kap's avatar
di68kap committed
128
    expression = Forward()
129
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
130
131
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
132
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
133
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
134
    EOF = NegativeLookahead(RegExp('.'))
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
    list_ = Series(RegExp('\\w+'), wsp__, ZeroOrMore(Series(Series(Token(","), wsp__), RegExp('\\w+'), wsp__)))
    whitespace = Series(RegExp('~'), wsp__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), wsp__), Series(RegExp("'(?:[^']|\\\\')*?'"), wsp__))
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
                                NegativeLookahead(Series(Token("="), wsp__))),
151
152
                         Series(Option(flowmarker), literal), Series(Option(flowmarker), plaintext),
                         Series(Option(flowmarker), regexp), Series(Option(flowmarker), whitespace),
153
                         Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
154
                         Series(Option(flowmarker), unordered), repetition, option)
155
156
157
158
159
160
161
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
                       Alternative(regexp, literal, list_), mandatory=1)
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
    syntax = Series(Option(Series(wsp__, RegExp(''))),
                    ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
162
163
164
    root__ = syntax


165
def grammar_changed(grammar_class, grammar_source: str) -> bool:
166
167
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
186
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
187
188
189
190
191
192
193
194
195
196
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


197
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


214
EBNF_AST_transformation_table = {
215
    # AST Transformations for EBNF-grammar
216
    "<":
217
        remove_expendables,
218
    "syntax":
219
        [],  # otherwise '"*": replace_by_single_child' would be applied
220
    "directive, definition":
221
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
222
    "expression":
223
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
224
    "term":
225
226
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
227
    "factor, flowmarker, retrieveop":
228
        replace_by_single_child,
229
    "group":
230
        [remove_brackets, replace_by_single_child],
231
232
    "unordered":
        remove_brackets,
233
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
234
        [reduce_single_child, remove_brackets,
235
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
236
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
237
        reduce_single_child,
238
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
239
        reduce_single_child,
240
    "list_":
241
        [flatten, remove_infix_operator],
242
    "*":
243
        replace_by_single_child
244
245
}

246

Eckhart Arnold's avatar
Eckhart Arnold committed
247
def EBNFTransform() -> TransformationFunc:
248
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
249

eckhart's avatar
eckhart committed
250

251
def get_ebnf_transformer() -> TransformationFunc:
252
253
254
255
256
257
258
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
259
260
261
262
263
264
265
266


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

267

268
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
269
ParserFactoryFunc = Callable[[], Grammar]
270
TransformerFactoryFunc = Callable[[], TransformationFunc]
271
272
CompilerFactoryFunc = Callable[[], Compiler]

273
274
275
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
276
277
278
279
'''


GRAMMAR_FACTORY = '''
280
def get_grammar() -> {NAME}Grammar:
281
282
283
284
285
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
286
287
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
288
289
290
291
'''


TRANSFORMER_FACTORY = '''
292
293
294
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

295
def get_transformer() -> TransformationFunc:
296
297
298
299
300
301
302
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
303
304
305
306
'''


COMPILER_FACTORY = '''
307
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
308
309
310
311
312
313
314
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
315
316
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
317
318
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
319

eckhart's avatar
eckhart committed
320
class EBNFCompilerError(CompilerError):
321
    """Error raised by `EBNFCompiler` class. (Not compilation errors
322
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
323
324
325
    pass


326
class EBNFCompiler(Compiler):
327
328
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
329
    in EBNF-Notation.
330
331
332
333
334
335
336
337
338
339
340
341
342

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
343
        current_symbols:  During compilation, a list containing the root
344
345
346
347
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

348
        rules:  Dictionary that maps rule names to a list of Nodes that
349
350
351
352
353
354
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

355
                Now `[node.content for node in self.rules['alternative']]`
356
357
                yields `['alternative = a | b', 'a', 'b']`

358
        symbols:  A mapping of symbol names to their first usage (not
359
360
                their definition!) in the EBNF source.

361
        variables:  A set of symbols names that are used with the
362
363
364
365
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

366
        recursive:  A set of symbols that are used recursively and
367
368
                therefore require a `Forward`-operator.

369
        definitions:  A dictionary of definitions. Other than `rules`
370
371
                this maps the symbols to their compiled definienda.

372
        deferred_taks:  A list of callables that is filled during
373
374
375
376
377
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

378
        root:   The name of the root symbol.
379

380
        directives:  A dictionary of all directives and their default
381
                values.
382
383
384

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
385
386
    """
    COMMENT_KEYWORD = "COMMENT__"
387
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
388
    RAW_WS_KEYWORD = "WHITESPACE__"
389
    WHITESPACE_PARSER_KEYWORD = "wsp__"
Eckhart Arnold's avatar
Eckhart Arnold committed
390
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
391
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
392
                "Potentially due to erroneous AST transformation."
393
394
395
396
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
397
398
399
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
400
    REPEATABLE_DIRECTIVES = {'tokens'}
401

402

403
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
404
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
405
406
        self._reset()

407

408
    def _reset(self):
409
        super(EBNFCompiler, self)._reset()
410
        self._result = ''           # type: str
411
        self.re_flags = set()       # type: Set[str]
412
413
414
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
415
416
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
417
        self.definitions = {}       # type: Dict[str, str]
418
        self.deferred_tasks = []    # type: List[Callable]
419
        self.root_symbol = ""       # type: str
eckhart's avatar
eckhart committed
420
        self.directives = {'whitespace': self.WHITESPACE['vertical'],
421
                           'comment': '',
422
                           'literalws': {'right'},
423
                           'tokens': set(),  # alt. 'preprocessor_tokens'
424
425
426
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
427

Eckhart Arnold's avatar
Eckhart Arnold committed
428
    @property
429
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
430
431
        return self._result

432
    # methods for generating skeleton code for preprocessor, transformer, and compiler
433

434
    def gen_preprocessor_skeleton(self) -> str:
435
436
437
438
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
439
        name = self.grammar_name + "Preprocessor"
440
        return "def %s(text):\n    return text, lambda i: i\n" % name \
441
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
442

443

444
    def gen_transformer_skeleton(self) -> str:
445
446
447
448
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
449
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
450
451
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
452
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
453
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
454
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
455
        transtable.append('    "<": remove_empty,')
456
        for name in self.rules:
eckhart's avatar
eckhart committed
457
            transformations = '[]'
458
459
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
460
                transformations = '[replace_or_reduce]'
461
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
462
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
463
            transtable.append('    "' + name + '": %s,' % transformations)
464
        transtable.append('    ":Token": reduce_single_child,')
465
        transtable += ['    "*": replace_by_single_child', '}', '']
466
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
467
468
        return '\n'.join(transtable)

469

470
    def gen_compiler_skeleton(self) -> str:
471
472
473
474
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
475
        if not self.rules:
476
477
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
478
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
479
480
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
481
                    '    """', '',
eckhart's avatar
eckhart committed
482
483
484
485
                    '    def __init__(self, grammar_name="'
                    + self.grammar_name + '", grammar_source=""):',
                    '        super(' + self.grammar_name
                    + 'Compiler, self).__init__(grammar_name, grammar_source)',
486
487
488
489
                    r"        assert re.match('\w+\Z', grammar_name)", '',
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
490
        for name in self.rules:
491
            method_name = Compiler.method_name(name)
492
            if name == self.root_symbol:
493
                compiler += ['    def ' + method_name + '(self, node):',
494
                             '        return self.fallback_compiler(node)', '']
495
            else:
di68kap's avatar
di68kap committed
496
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
497
                             '    #     return node', '']
498
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
499
        return '\n'.join(compiler)
500

501
    def verify_transformation_table(self, transtable):
502
503
504
505
506
507
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
508
        assert self._dirty_flag
509
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
510
511
512
513
514
515
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
Eckhart Arnold's avatar
Eckhart Arnold committed
516
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
517
518
        return messages

519
520
521
522
523
524
525
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
526

527
528
529
530
531
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
532
533
534
535
536
537
538
539
540
541

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

542
543
544
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
545
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
546

547
548
        # add special fields for Grammar class

549
550
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
551
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
552
553
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
Eckhart Arnold's avatar
Eckhart Arnold committed
554
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
555
556
557
558
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
559

560
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
eckhart's avatar
eckhart committed
561
562
563
564
565
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
                        + (', with this grammar:' if self.grammar_source else '.')]
566
        definitions.append(('parser_initialization__', '"upon instantiation"'))
567
        if self.grammar_source:
568
            definitions.append(('source_hash__',
569
                                '"%s"' % md5(self.grammar_source, __version__)))
570
            declarations.append('')
571
            declarations += [line for line in self.grammar_source.split('\n')]
572
573
574
575
576
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
577

578
        self.root_symbol = definitions[0][0] if definitions else ""
579
580
581
582
583
584
585
586
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
587
588
589
590
591
592

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
593
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
594
                                    "Missing definition for symbol '%s'" % symbol)
595
                # root_node.error_flag = True
596
597
598

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
599
600
601
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
602
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
603
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
604
605
606
607
608
609
610
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
611
612
613
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
614

615
        # set root_symbol parser and assemble python grammar definition
616

617
618
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
619
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
620
621
622
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
623

624
625
626

    ## compilation methods

627
    def on_syntax(self, node: Node) -> str:
628
        definitions = []  # type: List[Tuple[str, str]]
629
630

        # drop the wrapping sequence node
631
632
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
633
634

        # compile definitions and directives and collect definitions
635
        for nd in node.children:
636
            if nd.parser.name == "definition":
637
                definitions.append(self.compile(nd))
638
            else:
639
                assert nd.parser.name == "directive", nd.as_sxpr()
640
                self.compile(nd)
641
            # node.error_flag = max(node.error_flag, nd.error_flag)
642
        self.definitions.update(definitions)
643

644
        return self.assemble_parser(definitions, node)
645

646

647
    def on_definition(self, node: Node) -> Tuple[str, str]:
648
        rule = node.children[0].content
649
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
650
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
651
            if not first.errors:
eckhart's avatar
eckhart committed
652
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
653
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
654
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
655
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
656
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
657
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
658
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
659
                                ' end with a doube underscore "__".' % rule)
660
        elif rule in self.directives['tokens']:
eckhart's avatar
eckhart committed
661
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
662
                                'a preprocessor token.' % rule)
663
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
664
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
665
                                % rule + '(This may change in the future.)')
666
        try:
667
668
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
669
            defn = self.compile(node.children[1])
670
            if rule in self.variables:
671
                defn = 'Capture(%s)' % defn
672
                self.variables.remove(rule)
673
674
675
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
676
        except TypeError as error:
677
678
679
680
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
681
            self.tree.new_error(node, errmsg)
682
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
683
        return rule, defn
684

685

686
    def _check_rx(self, node: Node, rx: str) -> str:
687
688
        """
        Checks whether the string `rx` represents a valid regular
689
690
691
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
692
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
eckhart's avatar
eckhart committed
693
694
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
695
696
697
        try:
            re.compile(rx)
        except Exception as re_error:
eckhart's avatar
eckhart committed
698
            self.tree.new_error(node, "malformed regular expression %s: %s" %
eckhart's avatar
eckhart committed
699
                                (repr(rx), str(re_error)))
700
701
        return rx

702

703
    def on_directive(self, node: Node) -> str:
704
        key = node.children[0].content.lower()
705
        assert key not in self.directives['tokens']
706

707
708
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
709
                self.tree.new_error(node, 'Directive "%s" has already been defined earlier. '
eckhart's avatar
eckhart committed
710
711
                                    % key + 'Later definition will be ignored!',
                                    code=Error.REDEFINED_DIRECTIVE_WARNING)
712
713
714
                return ""
            self.defined_directives.add(key)

715
        if key in {'comment', 'whitespace'}:
716
717
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
eckhart's avatar
eckhart committed
718
                    self.tree.new_error(node, 'Directive "%s" must have one, but not %i values.'
eckhart's avatar
eckhart committed
719
                                        % (key, len(node.children[1].result)))
720
                value = self.compile(node.children[1]).pop()
721
722
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
723
                else:
eckhart's avatar
eckhart committed
724
                    self.tree.new_error(node, 'Value "%s" not allowed for directive "%s".'
eckhart's avatar
eckhart committed
725
                                        % (value, key))
726
            else:
727
728
729
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
eckhart's avatar
eckhart committed
730
                    self.tree.new_error(node, "Whitespace marker '~' not allowed in definition "
eckhart's avatar
eckhart committed
731
                                        "of %s regular expression." % key)
732
733
734
735
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
736
                if key == 'whitespace' and not re.match(value, ''):
eckhart's avatar
eckhart committed
737
                    self.tree.new_error(node, "Implicit whitespace should always "
eckhart's avatar
eckhart committed
738
                                        "match the empty string, /%s/ does not." % value)
739
            self.directives[key] = value
740

741
        elif key == 'ignorecase':
742
            if node.children[1].content.lower() not in {"off", "false", "no"}:
743
744
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
745
        # elif key == 'testing':
746
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
747
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
748

749
        elif key == 'literalws':
750
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
751
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
752
                    or ('none' in value and len(value) > 1)):
eckhart's avatar
eckhart committed
753
                self.tree.new_error(node, 'Directive "literalws" allows only `left`, `right`, '
eckhart's avatar
eckhart committed
754
                                    '`both` or `none`, not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
755
            wsp = {'left', 'right'} if 'both' in value \
756
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
757
            self.directives[key] = list(wsp)
758

759
        elif key in {'tokens', 'preprocessor_tokens'}:
760
            tokens = self.compile(node.children[1])
761
            redeclared = self.directives['tokens'] & tokens
762
            if redeclared:
eckhart's avatar
eckhart committed
763
                self.tree.new_error(node, 'Tokens %s have already been declared earlier. '
eckhart's avatar
eckhart committed
764
765
                                    % str(redeclared) + 'Later declaration will be ignored',
                                    code=Error.REDECLARED_TOKEN_WARNING)
766
            self.directives['tokens'] |= tokens - redeclared
767

768
        elif key.endswith('_filter'):
769
            filter_set = self.compile(node.children[1])
770
            if not isinstance(filter_set, set) or len(filter_set) != 1:
eckhart's avatar
eckhart committed
771
                self.tree.new_error(node, 'Directive "%s" accepts exactly on symbol, not %s'
eckhart's avatar
eckhart committed
772
                                    % (key, str(filter_set)))
773
            self.directives['filter'][key[:-7]] = filter_set.pop()
774

775
        else:
eckhart's avatar
eckhart committed
776
            self.tree.new_error(node, 'Unknown directive %s ! (Known ones are %s .)' %
eckhart's avatar
eckhart committed
777
                                (key, ', '.join(list(self.directives.keys()))))
778
779
        return ""

780

eckhart's avatar
eckhart committed
781
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str] = []) -> str:
782
783
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
784
785
        name for the particular non-terminal.
        """
786
        arguments = [self.compile(r) for r in node.children] + custom_args
787
788
        return parser_class + '(' + ', '.join(arguments) + ')'

789

790
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
791
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
792
793
        return self.non_terminal(node, 'Alternative')

794

795
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
796
797
798
799
800
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
eckhart's avatar
eckhart committed
801
        filtered_children = []  # type: List[Node]
di68kap's avatar
di68kap committed
802
        for nd in node.children:
803
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
eckhart's avatar
eckhart committed
804
805
                mandatory_marker.append(len(filtered_children))
                # if len(filtered_children) == 0:
eckhart's avatar
eckhart committed
806
                #     self.tree.new_error(nd.pos, 'First item of a series should not be mandatory.',
eckhart's avatar
eckhart committed
807
                #                         Error.WARNING)
eckhart's avatar
eckhart committed
808
                if len(mandatory_marker) > 1:
eckhart's avatar
eckhart committed
809
                    self.tree.new_error(nd, 'One mandatory marker (§) sufficient to declare '
eckhart's avatar
eckhart committed
810
                                        'the rest of the series as mandatory.', Error.WARNING)
di68kap's avatar
di68kap committed
811
812
813
814
            else:
                filtered_children.append(nd)
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
815
816
817
818
819
        if len(filtered_children) == 1:
            compiled = self.non_terminal(node, 'Required')
        else:
            custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
            compiled = self.non_terminal(node, 'Series', custom_args)
di68kap's avatar
di68kap committed
820
821
        node.result = saved_result
        return compiled
822

823

824
    def on_factor(self, node: Node) -> str:
825
        assert node.children
826
        assert len(node.children) >= 2, node.as_sxpr()
827
        prefix = node.children[0].content
828
        custom_args = []  # type: List[str]
829
830

        if prefix in {'::', ':'}:
831
832
            assert len(node.children) == 2
            arg = node.children[-1]
833
            if arg.parser.name != 'symbol':
eckhart's avatar
eckhart committed
834
                self.tree.new_error(node, ('Retrieve Operator "%s" requires a symbol, '
eckhart's avatar
eckhart committed
835
                                    'and not a %s.') % (prefix, str(arg.parser)))
836
                return str(arg.result)
837
            if str(arg) in self.directives['filter']:
838
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
839
            self.variables.add(str(arg))  # cast(str, arg.result)
840

841
        elif len(node.children) > 2:
842
843
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
844
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
eckhart's avatar
eckhart committed
845
                + node.children[2:]
846
847
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
848

849
        node.result = node.children[1:]
850
851
        try:
            parser_class = self.PREFIX_TABLE[prefix]
852
853
854
855
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
856
857
858
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
859
                        symlist = self.rules.get(nd.content, [])