ebnf.py 39.4 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
from collections import OrderedDict
21
from functools import partial
22
from typing import Callable, Dict, List, Set, Tuple
23

24
from DHParser.error import Error
25
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, RE, \
26
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
27
28
    Compiler
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
29
from DHParser.syntaxtree import Node, TransformationFunc, WHITESPACE_PTYPE, TOKEN_PTYPE
30
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re
31
from DHParser.transform import traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
32
    reduce_single_child, replace_by_single_child, remove_expendables, \
33
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
34
from DHParser.versionnumber import __version__
35

36
__all__ = ('get_ebnf_preprocessor',
37
38
39
40
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
41
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
42
           'EBNFCompilerError',
43
           'EBNFCompiler',
44
           'grammar_changed',
45
           'PreprocessorFactoryFunc',
46
47
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
48
           'CompilerFactoryFunc')
49
50


Eckhart Arnold's avatar
Eckhart Arnold committed
51
52
53
54
55
56
57
########################################################################
#
# EBNF scanning
#
########################################################################


58
59
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
60
61
62
63
64
65
66
67


########################################################################
#
# EBNF parsing
#
########################################################################

68

di68kap's avatar
di68kap committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
class EBNFGrammar(Grammar):
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol "=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] oneormore
88
89
                | [flowmarker] group
                | [flowmarker] unordered
di68kap's avatar
di68kap committed
90
91
92
93
94
95
96
                | repetition
                | option

    flowmarker =  "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
                | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

97
98
    group      =  "(" §expression ")"
    unordered  =  "<" §expression ">"                # elements of expression in arbitrary order
di68kap's avatar
di68kap committed
99
    oneormore  =  "{" expression "}+"
100
101
    repetition =  "{" §expression "}"
    option     =  "[" §expression "]"
di68kap's avatar
di68kap committed
102
103
104
105
106
107
108
109
110
111
112
113

    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:\\\/|[^\/])*?\/~?/~        # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
114
115
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
116
117
118
119
120
121
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
122
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
di68kap's avatar
di68kap committed
123
124
125
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
126
127
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
128
    oneormore = Series(Token("{"), expression, Token("}+"))
129
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
130
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
131
132
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
133
134
135
136
137
    factor = Alternative(
        Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
        Series(Option(flowmarker), literal), Series(Option(flowmarker), regexp),
        Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
        Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
138
139
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
140
141
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_),
                       mandatory=1)
di68kap's avatar
di68kap committed
142
    definition = Series(symbol, Token("="), expression, mandatory=1)
143
144
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)),
                    EOF, mandatory=2)
di68kap's avatar
di68kap committed
145
146
147
    root__ = syntax


148
def grammar_changed(grammar_class, grammar_source: str) -> bool:
149
150
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
169
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
170
171
172
173
174
175
176
177
178
179
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


180
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


197
EBNF_AST_transformation_table = {
198
    # AST Transformations for EBNF-grammar
199
    "+":
200
        remove_expendables,
201
    "syntax":
202
        [],  # otherwise '"*": replace_by_single_child' would be applied
203
    "directive, definition":
204
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
205
    "expression":
206
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
207
    "term":
208
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
209
    "factor, flowmarker, retrieveop":
210
        replace_by_single_child,
211
    "group":
212
        [remove_brackets, replace_by_single_child],
213
214
    "unordered":
        remove_brackets,
215
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
216
        [reduce_single_child, remove_brackets,
217
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
218
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
219
        reduce_single_child,
220
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
221
        reduce_single_child,
222
    "list_":
223
        [flatten, remove_infix_operator],
224
    "*":
225
        replace_by_single_child
226
227
}

228

Eckhart Arnold's avatar
Eckhart Arnold committed
229
def EBNFTransform() -> TransformationFunc:
230
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
231

232
def get_ebnf_transformer() -> TransformationFunc:
233
234
235
236
237
238
239
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
240
241
242
243
244
245
246
247


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

248

249
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
250
ParserFactoryFunc = Callable[[], Grammar]
251
TransformerFactoryFunc = Callable[[], TransformationFunc]
252
253
CompilerFactoryFunc = Callable[[], Compiler]

254
255
256
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
257
258
259
260
'''


GRAMMAR_FACTORY = '''
261
def get_grammar() -> {NAME}Grammar:
262
263
264
265
266
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
267
268
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
269
270
271
272
'''


TRANSFORMER_FACTORY = '''
273
274
275
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

276
def get_transformer() -> TransformationFunc:
277
278
279
280
281
282
283
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
284
285
286
287
'''


COMPILER_FACTORY = '''
288
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
289
290
291
292
293
294
295
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
296
297
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
298
299
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
300

301
302
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
303
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
304
305
306
    pass


307
class EBNFCompiler(Compiler):
308
309
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
310
    in EBNF-Notation.
311
312
313
314
315
316
317
318
319
320
321
322
323

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
324
        current_symbols:  During compilation, a list containing the root
325
326
327
328
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

329
        rules:  Dictionary that maps rule names to a list of Nodes that
330
331
332
333
334
335
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

336
                Now `[node.content for node in self.rules['alternative']]`
337
338
                yields `['alternative = a | b', 'a', 'b']`

339
        symbols:  A mapping of symbol names to their first usage (not
340
341
                their definition!) in the EBNF source.

342
        variables:  A set of symbols names that are used with the
343
344
345
346
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

347
        recursive:  A set of symbols that are used recursively and
348
349
                therefore require a `Forward`-operator.

350
        definitions:  A dictionary of definitions. Other than `rules`
351
352
                this maps the symbols to their compiled definienda.

353
        deferred_taks:  A list of callables that is filled during
354
355
356
357
358
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

359
        root:   The name of the root symbol.
360

361
        directives:  A dictionary of all directives and their default
362
                values.
363
364
365

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
366
367
    """
    COMMENT_KEYWORD = "COMMENT__"
368
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
369
370
    RAW_WS_KEYWORD = "WHITESPACE__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
371
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
372
                "Potentially due to erroneous AST transformation."
373
374
375
376
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
377
378
379
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
380
    REPEATABLE_DIRECTIVES = {'tokens'}
381

382

383
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
384
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
385
386
        self._reset()

387

388
    def _reset(self):
389
        super(EBNFCompiler, self)._reset()
390
        self._result = ''           # type: str
391
        self.re_flags = set()       # type: Set[str]
392
393
394
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
395
396
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
397
        self.definitions = {}       # type: Dict[str, str]
398
        self.deferred_tasks = []    # type: List[Callable]
399
        self.root_symbol = ""       # type: str
400
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
401
                           'comment': '',
402
                           'literalws': {'right'},
403
                           'tokens': set(),  # alt. 'preprocessor_tokens'
404
405
406
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
407

Eckhart Arnold's avatar
Eckhart Arnold committed
408
    @property
409
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
410
411
        return self._result

412
    # methods for generating skeleton code for preprocessor, transformer, and compiler
413

414
    def gen_preprocessor_skeleton(self) -> str:
415
416
417
418
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
419
        name = self.grammar_name + "Preprocessor"
420
        return "def %s(text):\n    return text\n" % name \
421
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
422

423

424
    def gen_transformer_skeleton(self) -> str:
425
426
427
428
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
429
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
430
431
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
432
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
433
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
434
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
435
        transtable.append('    "+": remove_empty,')
436
        for name in self.rules:
eckhart's avatar
eckhart committed
437
            transformations = '[]'
438
439
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
440
                transformations = '[replace_or_reduce]'
441
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
442
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
443
            transtable.append('    "' + name + '": %s,' % transformations)
Eckhart Arnold's avatar
Eckhart Arnold committed
444
        transtable.append('    ":Token, :RE": reduce_single_child,')
445
        transtable += ['    "*": replace_by_single_child', '}', '']
446
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
447
448
        return '\n'.join(transtable)

449

450
    def gen_compiler_skeleton(self) -> str:
451
452
453
454
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
455
        if not self.rules:
456
457
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
458
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
459
460
461
462
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
463
                    self.grammar_name + '", grammar_source=""):',
464
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
465
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
466
                    r"        assert re.match('\w+\Z', grammar_name)", '']
467
        for name in self.rules:
468
            method_name = Compiler.method_name(name)
469
            if name == self.root_symbol:
470
                compiler += ['    def ' + method_name + '(self, node):',
471
472
                             '        return node', '']
            else:
473
                compiler += ['    def ' + method_name + '(self, node):',
474
                             '        pass', '']
475
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
476
        return '\n'.join(compiler)
477

478

479
480
481
482
483
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
484
485
486
487
488
489
490
491
492
493

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

494
495
496
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
497
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
498

499
500
        # add special fields for Grammar class

501
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
502
                            if 'right' in self.directives['literalws'] else "''"))
503
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
504
                            if 'left' in self.directives['literalws'] else "''"))
505
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
506
507
508
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
509
510
511
512
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
513

514
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
515
        declarations = ['class ' + self.grammar_name +
516
                        'Grammar(Grammar):',
517
518
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
519
                        (', with this grammar:' if self.grammar_source else '.')]
520
        definitions.append(('parser_initialization__', '"upon instantiation"'))
521
        if self.grammar_source:
522
            definitions.append(('source_hash__',
523
                                '"%s"' % md5(self.grammar_source, __version__)))
524
            declarations.append('')
525
            declarations += [line for line in self.grammar_source.split('\n')]
526
527
528
529
530
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
531

532
        self.root_symbol = definitions[0][0] if definitions else ""
533
534
535
536
537
538
539
540
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
541
542
543
544
545
546
547

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
548
                # root_node.error_flag = True
549
550
551

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
552
553
554
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
555
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
556
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
557
558
559
560
561
562
563
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
564
565
566
            self.rules[leftover][0].add_error(
                ('Rule "%s" is not connected to parser root "%s" !') % 
                (leftover, self.root_symbol), Error.WARNING)
567

568
        # set root_symbol parser and assemble python grammar definition
569

570
571
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
572
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
573
574
575
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
576

577
578
579

    ## compilation methods

580
    def on_syntax(self, node: Node) -> str:
581
        definitions = []  # type: List[Tuple[str, str]]
582
583

        # drop the wrapping sequence node
584
585
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
586
587

        # compile definitions and directives and collect definitions
588
        for nd in node.children:
589
            if nd.parser.name == "definition":
590
                definitions.append(self.compile(nd))
591
            else:
592
                assert nd.parser.name == "directive", nd.as_sxpr()
593
                self.compile(nd)
594
            node.error_flag = max(node.error_flag, nd.error_flag)
595
        self.definitions.update(definitions)
596

597
        return self.assemble_parser(definitions, node)
598

599

600
    def on_definition(self, node: Node) -> Tuple[str, str]:
601
        rule = node.children[0].content
602
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
603
604
605
606
607
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
608
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
609
610
611
612
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
613
        elif rule in self.directives['tokens']:
614
            node.add_error('Symbol "%s" has already been defined as '
615
                           'a preprocessor token.' % rule)
616
617
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
618
                           % rule + '(This may change in the future.)')
619
        try:
620
621
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
622
            defn = self.compile(node.children[1])
623
            if rule in self.variables:
624
                defn = 'Capture(%s)' % defn
625
                self.variables.remove(rule)
626
627
628
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
629
        except TypeError as error:
630
631
632
633
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
634
635
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
636
        return rule, defn
637

638

639
    def _check_rx(self, node: Node, rx: str) -> str:
640
641
        """
        Checks whether the string `rx` represents a valid regular
642
643
644
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
645
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
646
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
647
648
649
650
651
652
653
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

654

655
    def on_directive(self, node: Node) -> str:
656
        key = node.children[0].content.lower()
657
        assert key not in self.directives['tokens']
658

659
660
661
662
663
664
665
666
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
                node.add_error('Directive "%s" has already been defined earlier. ' % key + \
                               'Later definition will be ignored!',
                               code=Error.REDEFINED_DIRECTIVE_WARNING)
                return ""
            self.defined_directives.add(key)

667
        if key in {'comment', 'whitespace'}:
668
669
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
670
                    node.add_error('Directive "%s" must have one, but not %i values.' %
671
                                   (key, len(node.children[1].result)))
672
                value = self.compile(node.children[1]).pop()
673
674
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
675
                else:
676
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
677
            else:
678
679
680
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
681
682
683
684
685
686
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
687
688
689
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
690
            self.directives[key] = value
691

692
        elif key == 'ignorecase':
693
            if node.children[1].content.lower() not in {"off", "false", "no"}:
694
695
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
696
        # elif key == 'testing':
697
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
698
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
699

700
        elif key == 'literalws':
701
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
702
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
703
                    or ('none' in value and len(value) > 1)):
704
705
706
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
707
            wsp = {'left', 'right'} if 'both' in value \
708
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
709
            self.directives[key] = list(wsp)
710

711
        elif key in {'tokens', 'preprocessor_tokens'}:
712
            tokens = self.compile(node.children[1])
713
            redeclared = self.directives['tokens'] & tokens
714
715
716
717
718
            if redeclared:
                node.add_error('Tokens %s have already been declared earlier. '
                               % str(redeclared) + 'Later declaration will be ignored',
                               code=Error.REDECLARED_TOKEN_WARNING)
            self.directives['tokens'] |= tokens - redeclared
719

720
        elif key.endswith('_filter'):
721
            filter_set = self.compile(node.children[1])
722
723
724
725
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
726

727
728
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
729
                           (key, ', '.join(list(self.directives.keys()))))
730
731
        return ""

732

733
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
734
735
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
736
737
        name for the particular non-terminal.
        """
738
        arguments = [self.compile(r) for r in node.children] + custom_args
739
        node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
740
741
        return parser_class + '(' + ', '.join(arguments) + ')'

742

743
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
744
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
745
746
        return self.non_terminal(node, 'Alternative')

747

748
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
749
750
751
752
753
754
755
756
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
757
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
di68kap's avatar
di68kap committed
758
759
760
761
762
763
764
765
766
767
768
769
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
770
        custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
di68kap's avatar
di68kap committed
771
772
773
        compiled = self.non_terminal(node, 'Series', custom_args)
        node.result = saved_result
        return compiled
774

775

776
    def on_factor(self, node: Node) -> str:
777
        assert node.children
778
        assert len(node.children) >= 2, node.as_sxpr()
779
        prefix = node.children[0].content
780
        custom_args = []  # type: List[str]
781
782

        if prefix in {'::', ':'}:
783
784
            assert len(node.children) == 2
            arg = node.children[-1]
785
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
786
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
787
788
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
789
            if str(arg) in self.directives['filter']:
790
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
791
            self.variables.add(str(arg))  # cast(str, arg.result)
792

793
        elif len(node.children) > 2:
794
795
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
796
797
798
799
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
800

801
        node.result = node.children[1:]
802
803
        try:
            parser_class = self.PREFIX_TABLE[prefix]
804
805
806
807
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
808
809
810
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
811
                        symlist = self.rules.get(nd.content, [])
812
813
814
815
816
817
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
818
819
                    if (nd.parser.name != "regexp" or nd.content[:1] != '/'
                            or nd.content[-1:] != '/'):
820
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
821
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
822
823
824
825

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
826
827
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
828
        return ""
829

830

831
    def on_option(self, node) -> str:
832
        return self.non_terminal(node, 'Option')
833

834

835
    def on_repetition(self, node) -> str:
836
837
        return self.non_terminal(node, 'ZeroOrMore')

838

839
    def on_oneormore(self, node) -> str:
840
841
        return self.non_terminal(node, 'OneOrMore')

842

843
    def on_group(self, node) -> str:
844
845
846
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

847
    def on_unordered(self, node) -> str:
848
849
850
        # return self.non_terminal(node, 'Unordered')
        assert len(node.children) == 1
        nd = node.children[0]
eckhart's avatar
eckhart committed
851
        for child in nd.children:
852
            if child.parser.ptype == TOKEN_PTYPE and nd.content == "§":
853
                node.add_error("Unordered parser lists cannot contain mandatory (§) items.")
eckhart's avatar
eckhart committed
854
        args = ', '.join(self.compile(child) for child in nd.children)
855
856
857
858
859
860
861
        if nd.parser.name == "term":
            return "AllOf(" + args + ")"
        elif nd.parser.name == "expression":
            return "SomeOf(" + args + ")"
        else:
            node.add_error("Unordered sequence or alternative requires at least two elements.")
            return ""
862

863
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
864
        symbol = node.content  # ; assert result == cast(str, node.result)
865
        if symbol in self.directives['tokens']:
866
            return 'PreprocessorToken("' + symbol + '")'
867
        else:
868
869
            self.current_symbols.append(node)
            if symbol not in self.symbols:
870
                self.symbols[symbol] = node  # remember first use of symbol
871
872
            if symbol in self.rules:
                self.recursive.add(symbol)
Eckhart Arnold's avatar
Eckhart Arnold committed
873
            if symbol in EBNFCompi