ebnf.py 47.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29
30
import keyword
import os
31

eckhart's avatar
eckhart committed
32
from DHParser.compile import CompilerError, Compiler
33
from DHParser.error import Error
34
35
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
36
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
37
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
38
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
39
    GLOBALS, CONFIG_PRESET, get_config_value, unrepr, typing
eckhart's avatar
eckhart committed
40
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
41
    reduce_single_child, replace_by_single_child, remove_expendables, \
eckhart's avatar
eckhart committed
42
    remove_tokens, flatten, forbid, assert_content
43
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
44
from typing import Callable, Dict, List, Set, Tuple, Union
eckhart's avatar
eckhart committed
45

46

47
__all__ = ('get_ebnf_preprocessor',
48
49
50
51
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
52
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
53
           'EBNFCompilerError',
54
           'EBNFCompiler',
55
           'grammar_changed',
56
           'PreprocessorFactoryFunc',
57
58
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
59
           'CompilerFactoryFunc')
60
61


Eckhart Arnold's avatar
Eckhart Arnold committed
62
63
64
65
66
67
68
########################################################################
#
# EBNF scanning
#
########################################################################


69
70
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
71
72
73
74
75
76
77
78


########################################################################
#
# EBNF parsing
#
########################################################################

79

di68kap's avatar
di68kap committed
80
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
81
    r"""
eckhart's avatar
eckhart committed
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
123
    """
di68kap's avatar
di68kap committed
124
    expression = Forward()
eckhart's avatar
eckhart committed
125
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
126
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
127
128
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
129
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
130
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
131
    EOF = NegativeLookahead(RegExp('.'))
132
133
134
135
136
137
138
139
140
141
142
143
144
145
    whitespace = Series(RegExp('~'), wsp__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), wsp__), Series(RegExp("'(?:[^']|\\\\')*?'"), wsp__))
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
146
147
148
149
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
150
151
152
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
153
154
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
155
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
156
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
157
158
159
    root__ = syntax


160
def grammar_changed(grammar_class, grammar_source: str) -> bool:
161
162
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
181
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
182
183
184
185
186
187
188
189
190
191
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


192
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
193
    try:
194
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
195
        return grammar
196
    except AttributeError:
197
198
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
199
200
201
202
203
204
205
206
207


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


208
EBNF_AST_transformation_table = {
209
    # AST Transformations for EBNF-grammar
210
    "<":
211
        remove_expendables,
212
    "syntax":
213
        [],  # otherwise '"*": replace_by_single_child' would be applied
214
    "directive, definition":
eckhart's avatar
eckhart committed
215
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
216
    "expression":
217
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
218
    "term":
219
220
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
221
    "factor, flowmarker, retrieveop":
222
        replace_by_single_child,
223
    "group":
224
        [remove_brackets, replace_by_single_child],
225
226
    "unordered":
        remove_brackets,
227
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
228
        [reduce_single_child, remove_brackets,
229
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
230
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
231
        reduce_single_child,
232
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
233
        reduce_single_child,
eckhart's avatar
eckhart committed
234
235
    # "list_":
    #     [flatten, remove_infix_operator],
236
    "*":
237
        replace_by_single_child
238
239
}

240

Eckhart Arnold's avatar
Eckhart Arnold committed
241
def EBNFTransform() -> TransformationFunc:
242
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
243

eckhart's avatar
eckhart committed
244

245
def get_ebnf_transformer() -> TransformationFunc:
246
    try:
247
        transformer = GLOBALS.EBNF_transformer_singleton
248
    except AttributeError:
249
250
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
251
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
252
253
254
255
256
257
258
259


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

260

261
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
262
ParserFactoryFunc = Callable[[], Grammar]
263
TransformerFactoryFunc = Callable[[], TransformationFunc]
264
265
CompilerFactoryFunc = Callable[[], Compiler]

266
267
268
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
269
270
271
272
'''


GRAMMAR_FACTORY = '''
273
def get_grammar() -> {NAME}Grammar:
274
    try:
275
        grammar = GLOBALS.{NAME}_{ID}_grammar_singleton
276
    except AttributeError:
277
278
        GLOBALS.{NAME}_{ID}_grammar_singleton = {NAME}Grammar()
        grammar = GLOBALS.{NAME}_{ID}_grammar_singleton
279
    return grammar
280
281
282
283
'''


TRANSFORMER_FACTORY = '''
284
285
286
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

287
def get_transformer() -> TransformationFunc:
288
    try:
289
        transformer = GLOBALS.{NAME}_{ID}_transformer_singleton
290
    except AttributeError:
291
292
        GLOBALS.{NAME}_{ID}_transformer_singleton = {NAME}Transform()
        transformer = GLOBALS.{NAME}_{ID}_transformer_singleton
293
    return transformer
294
295
296
297
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
298
def get_compiler() -> {NAME}Compiler:
299
    try:
300
        compiler = GLOBALS.{NAME}_{ID}_compiler_singleton
301
    except AttributeError:
eckhart's avatar
eckhart committed
302
        GLOBALS.{NAME}_{ID}_compiler_singleton = {NAME}Compiler()
303
        compiler = GLOBALS.{NAME}_{ID}_compiler_singleton
304
    return compiler
305
306
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
307

eckhart's avatar
eckhart committed
308
class EBNFCompilerError(CompilerError):
309
    """Error raised by `EBNFCompiler` class. (Not compilation errors
310
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
311
312
313
    pass


314
class EBNFCompiler(Compiler):
315
316
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
317
    in EBNF-Notation.
318
319
320
321
322
323
324
325
326
327
328
329
330

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
331
        current_symbols:  During compilation, a list containing the root
332
333
334
335
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

336
        rules:  Dictionary that maps rule names to a list of Nodes that
337
338
339
340
341
342
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

343
                Now `[node.content for node in self.rules['alternative']]`
344
345
                yields `['alternative = a | b', 'a', 'b']`

346
        symbols:  A mapping of symbol names to their first usage (not
347
348
                their definition!) in the EBNF source.

349
        variables:  A set of symbols names that are used with the
350
351
352
353
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

354
        recursive:  A set of symbols that are used recursively and
355
356
                therefore require a `Forward`-operator.

357
        definitions:  A dictionary of definitions. Other than `rules`
358
359
                this maps the symbols to their compiled definienda.

360
        deferred_tasks:  A list of callables that is filled during
361
362
363
364
365
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

366
        root_symbol: The name of the root symbol.
367

368
        directives:  A dictionary of all directives and their default
369
                values.
370
371
372

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
373

eckhart's avatar
eckhart committed
374
375
376
377
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

378
379
380
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
381
382
    """
    COMMENT_KEYWORD = "COMMENT__"
383
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
384
    RAW_WS_KEYWORD = "WHITESPACE__"
385
    WHITESPACE_PARSER_KEYWORD = "wsp__"
386
387
    RESUME_RULES_KEYWORD = "resume_rules__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD, RESUME_RULES_KEYWORD}
388
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
389
                "Potentially due to erroneous AST transformation."
390
391
392
393
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
394
395
396
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
397
    REPEATABLE_DIRECTIVES = {'tokens'}
398

399

eckhart's avatar
eckhart committed
400
    def __init__(self, grammar_name="DSL", grammar_source=""):
401
        self.grammar_id = 0
eckhart's avatar
eckhart committed
402
403
        super(EBNFCompiler, self).__init__()  # calls the _reset()-method
        self.set_grammar_name(grammar_name, grammar_source)
404

405

406
    def _reset(self):
407
        super(EBNFCompiler, self)._reset()
408
        self._result = ''           # type: str
409
        self.re_flags = set()       # type: Set[str]
410
411
412
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
413
414
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
415
        self.definitions = {}       # type: Dict[str, str]
416
        self.deferred_tasks = []    # type: List[Callable]
417
        self.root_symbol = ""       # type: str
eckhart's avatar
eckhart committed
418
        self.directives = {'whitespace': self.WHITESPACE['vertical'],
419
                           'comment': '',
420
                           'literalws': {'right'},
421
                           'tokens': set(),   # alt. 'preprocessor_tokens'
422
                           'filter': dict(),  # alt. 'filter'
423
424
                           'error': dict(),   # customized error messages
                           'resume': dict()}  # reentry points after parser errors
425
426
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
427
428
        self.grammar_id += 1

429

Eckhart Arnold's avatar
Eckhart Arnold committed
430
    @property
431
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
432
433
        return self._result

eckhart's avatar
eckhart committed
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
        self.grammar_name = grammar_name
        self.grammar_source = load_if_file(grammar_source)
        return self


451
    # methods for generating skeleton code for preprocessor, transformer, and compiler
452

453
    def gen_preprocessor_skeleton(self) -> str:
454
455
456
457
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
458
        name = self.grammar_name + "Preprocessor"
459
        return "def %s(text):\n    return text, lambda i: i\n" % name \
460
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
461

462

463
    def gen_transformer_skeleton(self) -> str:
464
465
466
467
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
468
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
469
470
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
471
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
472
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
473
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
474
        transtable.append('    "<": remove_empty,')
475
        for name in self.rules:
eckhart's avatar
eckhart committed
476
            transformations = '[]'
477
478
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
479
                transformations = '[replace_or_reduce]'
480
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
481
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
482
            transtable.append('    "' + name + '": %s,' % transformations)
483
        transtable.append('    ":Token": reduce_single_child,')
484
        transtable += ['    "*": replace_by_single_child', '}', '']
485
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
486
487
        return '\n'.join(transtable)

488

489
    def gen_compiler_skeleton(self) -> str:
490
491
492
493
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
494
        if not self.rules:
495
496
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
497
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
498
499
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
500
                    '    """', '',
eckhart's avatar
eckhart committed
501
502
503
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
504
505
506
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
507
        for name in self.rules:
508
            method_name = Compiler.method_name(name)
509
            if name == self.root_symbol:
510
                compiler += ['    def ' + method_name + '(self, node):',
511
                             '        return self.fallback_compiler(node)', '']
512
            else:
di68kap's avatar
di68kap committed
513
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
514
                             '    #     return node', '']
515
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
516
        return '\n'.join(compiler)
517

518
    def verify_transformation_table(self, transtable):
519
520
521
522
523
524
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
525
        assert self._dirty_flag
526
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
527
528
529
530
531
532
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
Eckhart Arnold's avatar
Eckhart Arnold committed
533
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
534
535
        return messages

536
537
538
539
540
541
542
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
543

544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
        elif value[0] + value[-1] == '//':
            value = self._check_rx(node, value[1:-1])
        return value


    def _generate_resume_rule(self, nd: Node) -> Union[str, unrepr]:
        """Generates a resume rules from the nodes content. Returns an
        empty string in case the node is neither regexp nor literal.
        """
        if nd.parser.name == 'regexp':
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
        elif nd.parser.name == 'literal':
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''


584
585
586
587
588
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
589
590
591
592
593
594
595
596
597
598

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

599
600
601
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
602
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
603

604
605
        # add special fields for Grammar class

606
607
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
608
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
609
610
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
Eckhart Arnold's avatar
Eckhart Arnold committed
611
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
612
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634

        # prepare and add resume-rules

        resume_rules = dict()  # type: Dict[str, List[Union[str, unrpr]]]
        for symbol, raw_rules in self.directives['resume'].items():
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
                        refined = self._generate_resume_rule(nd)
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
635
636
637

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
638

639
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
640
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
641
642
643
644
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
645
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
646
        definitions.append(('parser_initialization__', '"upon instantiation"'))
647
        if self.grammar_source:
648
            definitions.append(('source_hash__',
649
                                '"%s"' % md5(self.grammar_source, __version__)))
650
            declarations.append('')
651
652
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
653
654
655
656
657
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
658

659
        self.root_symbol = definitions[0][0] if definitions else ""
660
661
662
663
664
665
666
667
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
668
669
670
671
672
673

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
674
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
675
                                    "Missing definition for symbol '%s'" % symbol)
676
                # root_node.error_flag = True
677
678
679

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
680
681
682
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
683
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
684
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
685
686
687
688
689
690
691
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
692
693
694
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
695

696
        # set root_symbol parser and assemble python grammar definition
697

698
699
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
700
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
701
        self._result = '\n    '.join(declarations) \
702
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
703
        return self._result
704

705
706
707

    ## compilation methods

708
    def on_syntax(self, node: Node) -> str:
709
        definitions = []  # type: List[Tuple[str, str]]
710
711

        # drop the wrapping sequence node
712
713
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
714
715

        # compile definitions and directives and collect definitions
716
        for nd in node.children:
717
            if nd.parser.name == "definition":
718
                definitions.append(self.compile(nd))
719
            else:
720
                assert nd.parser.name == "directive", nd.as_sxpr()
721
                self.compile(nd)
722
            # node.error_flag = max(node.error_flag, nd.error_flag)
723
        self.definitions.update(definitions)
724

725
        return self.assemble_parser(definitions, node)
726

727

728
    def on_definition(self, node: Node) -> Tuple[str, str]:
729
        rule = node.children[0].content
730
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
731
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
732
            if not first.errors:
eckhart's avatar
eckhart committed
733
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
734
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
735
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
736
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
737
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
738
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
739
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
740
                                ' end with a doube underscore "__".' % rule)
741
        elif rule in self.directives['tokens']:
eckhart's avatar
eckhart committed
742
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
743
                                'a preprocessor token.' % rule)
744
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
745
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
746
                                % rule + '(This may change in the future.)')
747
        try:
748
749
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
750
            defn = self.compile(node.children[1])
751
            if rule in self.variables:
752
                defn = 'Capture(%s)' % defn
753
                self.variables.remove(rule)
754
755
756
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
757
        except TypeError as error:
758
759
760
761
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
762
            self.tree.new_error(node, errmsg)
763
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
764
        return rule, defn
765

766

767
    def on_directive(self, node: Node) -> str:
768
        key = node.children[0].content
769
        assert key not in self.directives['tokens']
770

771
772
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
773
                self.tree.new_error(node, 'Directive "%s" has already been defined earlier. '
eckhart's avatar
eckhart committed
774
775
                                    % key + 'Later definition will be ignored!',
                                    code=Error.REDEFINED_DIRECTIVE_WARNING)
776
777
778
                return ""
            self.defined_directives.add(key)

eckhart's avatar
eckhart committed
779
780
781
782
783
        def check_argnum(n: int = 1):
            if len(node.children) > n + 1:
                self.tree.new_error(node, 'Directive "%s" must have one, but not %i values.'
                                    % (key, len(node.children) - 1))

784
        if key in {'comment', 'whitespace'}:
eckhart's avatar
eckhart committed
785
786
787
            check_argnum()
            if node.children[1].parser.name == "symbol":
                value = node.children[1].content
788
789
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
790
                else:
eckhart's avatar
eckhart committed
791
                    self.tree.new_error(node, 'Value "%s" not allowed for directive "%s".'
eckhart's avatar
eckhart committed
792
                                        % (value, key))
793
            else:
794
                value = self._extract_regex(node.children[1])
795
                if key == 'whitespace' and not re.match(value, ''):
eckhart's avatar
eckhart committed
796
                    self.tree.new_error(node, "Implicit whitespace should always "
eckhart's avatar
eckhart committed
797
                                        "match the empty string, /%s/ does not." % value)
798
            self.directives[key] = value
799

800
        elif key == 'ignorecase':
eckhart's avatar
eckhart committed
801
            check_argnum()
802
            if node.children[1].content.lower() not in {"off", "false", "no"}:
803
804
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
805
        # elif key == 'testing':
806
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
807
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
808

809
        elif key == 'literalws':
eckhart's avatar
eckhart committed
810
811
812
            values = {child.content.strip().lower() for child in node.children[1:]}
            if ((values - {'left', 'right', 'both', 'none'})
                    or ('none' in values and len(values) > 1)):
eckhart's avatar
eckhart committed
813
                self.tree.new_error(node, 'Directive "literalws" allows only `left`, `right`, '
eckhart's avatar
eckhart committed
814
815
816
                                    '`both` or `none`, not `%s`' % ", ".join(values))
            wsp = {'left', 'right'} if 'both' in values \
                else {} if 'none' in values else values
eckhart's avatar
eckhart committed
817
            self.directives[key] = list(wsp)
818

819
        elif key in {'tokens', 'preprocessor_tokens'}:
eckhart's avatar
eckhart committed
820
            tokens = {child.content.strip() for child in node.children[1:]}
821
            redeclared = self.directives['tokens'] & tokens
822
            if redeclared:
eckhart's avatar
eckhart committed
823
                self.tree.new_error(node, 'Tokens %s have already been declared earlier. '
eckhart's avatar
eckhart committed
824
825
                                    % str(redeclared) + 'Later declaration will be ignored',
                                    code=Error.REDECLARED_TOKEN_WARNING)
826
            self.directives['tokens'] |= tokens - redeclared
827

828
        elif key.endswith('_filter'):
eckhart's avatar
eckhart committed
829
            check_argnum()
830
831
            symbol = key[:-7]
            self.directives['filter'][symbol] = node.children[1].content.strip()
832

833
        elif key.endswith('_error'):
eckhart's avatar
eckhart committed
834
835
836
            check_argnum()
            if not node.children[1].parser.name == "literal":
                self.tree.new_error(node, 'Directive "%s" requires message string as argument')
837
838
839
840
841
            error_msg = node.children[1].content
            symbol = key[:-6]
            if symbol in self.rules:
                self.tree.new_error(node, 'Custom error message for symbol "%s"' % symbol
                                    + 'must be defined before the symbol!')
842
843
844
845
846
847
848
            if symbol in self.directives['error']:
                self.tree.new_error(node, 'Error message for "%s" has already been customized '
                                          'earlier!' % symbol)
            else:
                self.directives['error'][symbol] = error_msg

        elif key.endswith('_resume'):
849
850
851
852
            # if not all(child.parser.name in ('literal', 'regexp') for child in node.children[1:]):
            #     self.tree.new_error(node, 'Directive "%s" accepts only regular expressions or '
            #                               'plain strings as arguments, but no symbols without '
            #                               'quotation marks!' % key)
853
            symbol = key[:-7]
854
855
856
857
            if symbol in self.directives['resume']:
                self.tree.new_error(node, 'Reentry conditions for "%s" have already been defined'
                                          ' earlier!' % symbol)
            else:
eckhart's avatar
eckhart committed
858
                reentry_conditions = []  # type: List[Union[unrepr, str]]
859
                for child in node.children[1:]:
860
861
862
863
864
865
866
                    rule = self._generate_resume_rule(child)
                    if rule:
                        reentry_conditions.append(rule)
                    else:  # child.parser.name == 'symbol'
                        if child.content not in self.symbols:
                            self.symbols[child.content] = node
                        reentry_conditions.append(unrepr(child.content.strip()))
867
                self.directives['resume'][symbol] = reentry_conditions
868

869
        else:
eckhart's avatar
eckhart committed
870
            self.tree.new_error(node, 'Unknown directive %s ! (Known ones are %s .)' %
eckhart's avatar
eckhart committed
871
                                (key, ', '.join(list(self.directives.keys()))))
872
873
874
875
876
877
878
879
880

        try:
            if symbol not in self.symbols:
                # remember first use of symbol, so that dangling references or
                # redundant definitions or usages of symbols can be detected later
                self.symbols[symbol] = node
        except NameError:
            pass  # no symbol was referred to in directive

881
882
        return ""

883

eckhart's avatar
eckhart committed
884
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str] = []) -> str:
885
886
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
887
888
        name for the particular non-terminal.
        """
889
        arguments = [self.compile(r) for r in node.children] + custom_args
890
891
        return parser_class + '(' + ', '.join(arguments) + ')'

892

893
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
894
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
895
896
        return self.non_terminal(node, 'Alternative')

897

898
    def on_term(self, node) -> str: