ebnf.py 51.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29
30
import keyword
import os
31

eckhart's avatar
eckhart committed
32
from DHParser.compile import CompilerError, Compiler
33
from DHParser.error import Error
34
35
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
36
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
37
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
38
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
39
    GLOBALS, CONFIG_PRESET, get_config_value, unrepr, typing
eckhart's avatar
eckhart committed
40
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
41
    reduce_single_child, replace_by_single_child, remove_expendables, \
eckhart's avatar
eckhart committed
42
    remove_tokens, flatten, forbid, assert_content
43
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
44
from typing import Callable, Dict, List, Set, Tuple, Union
eckhart's avatar
eckhart committed
45

46

47
__all__ = ('get_ebnf_preprocessor',
48
49
50
51
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
52
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
53
           'EBNFCompilerError',
54
           'EBNFCompiler',
55
           'grammar_changed',
56
           'PreprocessorFactoryFunc',
57
58
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
59
           'CompilerFactoryFunc')
60
61


Eckhart Arnold's avatar
Eckhart Arnold committed
62
63
64
65
66
67
68
########################################################################
#
# EBNF scanning
#
########################################################################


69
70
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
71
72
73
74
75
76
77
78


########################################################################
#
# EBNF parsing
#
########################################################################

79

di68kap's avatar
di68kap committed
80
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
81
    r"""
eckhart's avatar
eckhart committed
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
123
    """
di68kap's avatar
di68kap committed
124
    expression = Forward()
eckhart's avatar
eckhart committed
125
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
126
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
127
128
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
129
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
130
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
131
    EOF = NegativeLookahead(RegExp('.'))
132
133
134
135
136
137
138
139
140
141
142
143
144
145
    whitespace = Series(RegExp('~'), wsp__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), wsp__), Series(RegExp("'(?:[^']|\\\\')*?'"), wsp__))
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
146
147
148
149
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
150
151
152
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
153
154
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
155
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
156
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
157
158
159
    root__ = syntax


160
def grammar_changed(grammar_class, grammar_source: str) -> bool:
161
162
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
181
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
182
183
184
185
186
187
188
189
190
191
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


192
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
193
    try:
194
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
195
        return grammar
196
    except AttributeError:
197
198
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
199
200
201
202
203
204
205
206
207


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


208
EBNF_AST_transformation_table = {
209
    # AST Transformations for EBNF-grammar
210
    "<":
211
        remove_expendables,
212
    "syntax":
213
        [],  # otherwise '"*": replace_by_single_child' would be applied
214
    "directive, definition":
eckhart's avatar
eckhart committed
215
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
216
    "expression":
217
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
218
    "term":
219
220
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
221
    "factor, flowmarker, retrieveop":
222
        replace_by_single_child,
223
    "group":
224
        [remove_brackets, replace_by_single_child],
225
226
    "unordered":
        remove_brackets,
227
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
228
        [reduce_single_child, remove_brackets,
229
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
230
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
231
        reduce_single_child,
232
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
233
        reduce_single_child,
eckhart's avatar
eckhart committed
234
235
    # "list_":
    #     [flatten, remove_infix_operator],
236
    "*":
237
        replace_by_single_child
238
239
}

240

Eckhart Arnold's avatar
Eckhart Arnold committed
241
def EBNFTransform() -> TransformationFunc:
242
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
243

eckhart's avatar
eckhart committed
244

245
def get_ebnf_transformer() -> TransformationFunc:
246
    try:
247
        transformer = GLOBALS.EBNF_transformer_singleton
248
    except AttributeError:
249
250
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
251
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
252
253
254
255
256
257
258
259


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

260

261
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
262
ParserFactoryFunc = Callable[[], Grammar]
263
TransformerFactoryFunc = Callable[[], TransformationFunc]
264
265
CompilerFactoryFunc = Callable[[], Compiler]

266
267
268
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
269
270
271
272
'''


GRAMMAR_FACTORY = '''
273
def get_grammar() -> {NAME}Grammar:
eckhart's avatar
eckhart committed
274
    global GLOBALS
275
    try:
276
        grammar = GLOBALS.{NAME}_{ID}_grammar_singleton
277
    except AttributeError:
278
        GLOBALS.{NAME}_{ID}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
279
280
        if hasattr(get_grammar, 'python_src__'):
            GLOBALS.{NAME}_{ID}_grammar_singleton.python_src__ = get_grammar.python_src__
281
        grammar = GLOBALS.{NAME}_{ID}_grammar_singleton
282
    return grammar
283
284
285
286
'''


TRANSFORMER_FACTORY = '''
287
288
289
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

290
def get_transformer() -> TransformationFunc:
291
    try:
292
        transformer = GLOBALS.{NAME}_{ID}_transformer_singleton
293
    except AttributeError:
294
295
        GLOBALS.{NAME}_{ID}_transformer_singleton = {NAME}Transform()
        transformer = GLOBALS.{NAME}_{ID}_transformer_singleton
296
    return transformer
297
298
299
300
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
301
def get_compiler() -> {NAME}Compiler:
302
    try:
303
        compiler = GLOBALS.{NAME}_{ID}_compiler_singleton
304
    except AttributeError:
eckhart's avatar
eckhart committed
305
        GLOBALS.{NAME}_{ID}_compiler_singleton = {NAME}Compiler()
306
        compiler = GLOBALS.{NAME}_{ID}_compiler_singleton
307
    return compiler
308
309
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
310

311
312
313
314
315
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}


eckhart's avatar
eckhart committed
316
317
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
318
319


320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

        resume: mapping of symbols to a list of search conditions. A
                search condition can be either a string ot a regular
                expression. The closest match from all search conditions
                is the point of reentry for the parser after a parser
                has error occurred.
    """
eckhart's avatar
eckhart committed
353
354
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'resume']

355
356
357
358
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
359
        self.tokens = set()   # type: Collection[str]
360
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
361
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
362
363
364
365
366
367
368
369
370
371
372
373
374
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

    def keys(self):
        return self.__dict__.keys()


eckhart's avatar
eckhart committed
375
class EBNFCompilerError(CompilerError):
376
    """Error raised by `EBNFCompiler` class. (Not compilation errors
377
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
378
379
380
    pass


381
class EBNFCompiler(Compiler):
382
383
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
384
    in EBNF-Notation.
385
386
387
388
389
390
391
392
393
394
395
396
397

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
398
        current_symbols:  During compilation, a list containing the root
399
400
401
402
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

403
        rules:  Dictionary that maps rule names to a list of Nodes that
404
405
406
407
408
409
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

410
                Now `[node.content for node in self.rules['alternative']]`
411
412
                yields `['alternative = a | b', 'a', 'b']`

413
        symbols:  A mapping of symbol names to their first usage (not
414
415
                their definition!) in the EBNF source.

416
        variables:  A set of symbols names that are used with the
417
418
419
420
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

421
        recursive:  A set of symbols that are used recursively and
422
423
                therefore require a `Forward`-operator.

424
        definitions:  A dictionary of definitions. Other than `rules`
425
426
                this maps the symbols to their compiled definienda.

427
        deferred_tasks:  A list of callables that is filled during
428
429
430
431
432
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

433
        root_symbol: The name of the root symbol.
434

435
436
437
438
439
440
441
442
443
444
445
446
447
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
                `test_ebnf.TestErrorCustomization`
448
449
450

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
451

eckhart's avatar
eckhart committed
452
453
454
455
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

456
457
458
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
459
460
    """
    COMMENT_KEYWORD = "COMMENT__"
461
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
462
    RAW_WS_KEYWORD = "WHITESPACE__"
463
    WHITESPACE_PARSER_KEYWORD = "wsp__"
464
    RESUME_RULES_KEYWORD = "resume_rules__"
eckhart's avatar
eckhart committed
465
466
467
    ERR_MSG_SUFFIX = '_err_msg__'
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD,
                        RESUME_RULES_KEYWORD, ERR_MSG_SUFFIX}
468
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
469
                "Potentially due to erroneous AST transformation."
470
471
472
473
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
474
    REPEATABLE_DIRECTIVES = {'tokens'}
475

476

eckhart's avatar
eckhart committed
477
    def __init__(self, grammar_name="DSL", grammar_source=""):
478
        self.grammar_id = 0
eckhart's avatar
eckhart committed
479
480
        super(EBNFCompiler, self).__init__()  # calls the _reset()-method
        self.set_grammar_name(grammar_name, grammar_source)
481

482

483
    def _reset(self):
484
        super(EBNFCompiler, self)._reset()
485
        self._result = ''           # type: str
486
        self.re_flags = set()       # type: Set[str]
487
488
489
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
490
491
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
492
        self.definitions = {}       # type: Dict[str, str]
493
        self.deferred_tasks = []    # type: List[Callable]
494
        self.root_symbol = ""       # type: str
495
496
497
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
498
499
        self.grammar_id += 1

500

Eckhart Arnold's avatar
Eckhart Arnold committed
501
    @property
502
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
503
504
        return self._result

eckhart's avatar
eckhart committed
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
        self.grammar_name = grammar_name
        self.grammar_source = load_if_file(grammar_source)
        return self


522
    # methods for generating skeleton code for preprocessor, transformer, and compiler
523

524
    def gen_preprocessor_skeleton(self) -> str:
525
526
527
528
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
529
        name = self.grammar_name + "Preprocessor"
530
        return "def %s(text):\n    return text, lambda i: i\n" % name \
531
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
532

533

534
    def gen_transformer_skeleton(self) -> str:
535
536
537
538
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
539
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
540
541
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
542
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
543
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
544
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
545
        transtable.append('    "<": remove_empty,')
546
        for name in self.rules:
eckhart's avatar
eckhart committed
547
            transformations = '[]'
548
549
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
550
                transformations = '[replace_or_reduce]'
551
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
552
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
553
            transtable.append('    "' + name + '": %s,' % transformations)
554
        transtable.append('    ":Token": reduce_single_child,')
555
        transtable += ['    "*": replace_by_single_child', '}', '']
556
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
557
558
        return '\n'.join(transtable)

559

560
    def gen_compiler_skeleton(self) -> str:
561
562
563
564
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
565
        if not self.rules:
566
567
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
568
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
569
570
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
571
                    '    """', '',
eckhart's avatar
eckhart committed
572
573
574
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
575
576
577
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
578
        for name in self.rules:
579
            method_name = Compiler.method_name(name)
580
            if name == self.root_symbol:
581
                compiler += ['    def ' + method_name + '(self, node):',
582
                             '        return self.fallback_compiler(node)', '']
583
            else:
di68kap's avatar
di68kap committed
584
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
585
                             '    #     return node', '']
586
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
587
        return '\n'.join(compiler)
588

589
    def verify_transformation_table(self, transtable):
590
591
592
593
594
595
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
596
        assert self._dirty_flag
597
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
598
599
600
601
602
603
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
Eckhart Arnold's avatar
Eckhart Arnold committed
604
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
605
606
        return messages

607
608
609
610
611
612
613
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
614

615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
        elif value[0] + value[-1] == '//':
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
643
    def _gen_search_rule(self, nd: Node) -> ReprType:
644
645
646
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
647
648
649
650
651
652
653
654
655
        """
        if nd.parser.name == 'regexp':
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
        elif nd.parser.name == 'literal':
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''


656
657
658
659
660
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
661
662
663
664
665
666
667
668
669
670

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

671
672
673
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
674
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
675

676
677
        # add special fields for Grammar class

678
679
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
680
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
681
682
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
683
684
        definitions.append((self.RAW_WS_KEYWORD, "r'{}'".format(self.directives.whitespace)))
        definitions.append((self.COMMENT_KEYWORD, "r'{}'".format(self.directives.comment)))
685
686
687

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
688
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
689
        for symbol, raw_rules in self.directives.resume.items():
690
691
692
693
694
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
695
                        refined = self._gen_search_rule(nd)
696
697
698
699
700
701
702
703
704
705
706
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
707

eckhart's avatar
eckhart committed
708
709
        # prepare and add customized error-messages

710
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
711
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
712
713
714
715
716
717
718
719
720
721
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

722
723
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
724

725
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
726
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
727
728
729
730
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
731
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
732
        definitions.append(('parser_initialization__', '"upon instantiation"'))
733
        if self.grammar_source:
734
            definitions.append(('source_hash__',
735
                                '"%s"' % md5(self.grammar_source, __version__)))
736
            declarations.append('')
737
738
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
739
740
741
742
743
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
744

745
        self.root_symbol = definitions[0][0] if definitions else ""
746
747
748
749
750
751
752
753
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
754
755
756
757
758
759

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
760
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
761
                                    "Missing definition for symbol '%s'" % symbol)
762
                # root_node.error_flag = True
763
764
765

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
766
767
768
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
769
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
770
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
771
772
773
774
775
776
777
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
778
779
780
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
781

782
        # set root_symbol parser and assemble python grammar definition
783

784
785
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
786
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
787
        self._result = '\n    '.join(declarations) \
788
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
789
        return self._result
790

791
792
793

    ## compilation methods

794
    def on_syntax(self, node: Node) -> str:
795
        definitions = []  # type: List[Tuple[str, str]]
796
797

        # drop the wrapping sequence node
798
799
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
800
801

        # compile definitions and directives and collect definitions
802
        for nd in node.children:
803
            if nd.parser.name == "definition":
804
                definitions.append(self.compile(nd))
805
            else:
806
                assert nd.parser.name == "directive", nd.as_sxpr()
807
                self.compile(nd)
808
            # node.error_flag = max(node.error_flag, nd.error_flag)
809
        self.definitions.update(definitions)
810

811
        return self.assemble_parser(definitions, node)
812

813

814
    def on_definition(self, node: Node) -> Tuple[str, str]:
815
        rule = node.children[0].content
816
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
817
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
818
            if not first.errors:
eckhart's avatar
eckhart committed
819
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
820
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
821
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
822
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
823
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
824
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
825
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
826
                                ' end with a doube underscore "__".' % rule)
827
        elif rule in self.directives.tokens:
eckhart's avatar
eckhart committed
828
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
829
                                'a preprocessor token.' % rule)
830
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
831
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
832
                                % rule + '(This may change in the future.)')
833
        try:
834
835
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
836
            defn = self.compile(node.children[1])
837
            if rule in self.variables:
838
                defn = 'Capture(%s)' % defn
839
                self.variables.remove(rule)
840
841
842
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
843
        except TypeError as error:
844
845
846
847
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
848
            self.tree.new_error(node, errmsg)
849
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
850
        return rule, defn
851

852

853
    def on_directive(self, node: Node) -> str:
854
        key = node.children[0].content
855
        assert key not in self.directives.tokens
856

857
858
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
859
                self.tree.new_error(node, 'Directive "%s" has already been defined earlier. '
eckhart's avatar
eckhart committed
860
861
                                    % key + 'Later definition will be ignored!',
                                    code=Error.REDEFINED_DIRECTIVE_WARNING)
862
863
864
                return ""
            self.defined_directives.add(key)

eckhart's avatar
eckhart committed
865
866
        def check_argnum(n: int = 1):
            if len(node.children) > n + 1:
867
                self.tree.new_error(node, 'Directive "%s" can have at most %i values.' % (key, n))
eckhart's avatar
eckhart committed
868

869
        if key in {'comment', 'whitespace'}:
eckhart's avatar
eckhart committed
870
871
872
            check_argnum()
            if node.children[1].parser.name == "symbol":
                value = node.children[1].content
873
874
                if key == 'whitespace' and value in WHITESPACE_TYPES:
                    value = WHITESPACE_TYPES[value]  # replace whitespace-name by regex
875
                else:
eckhart's avatar
eckhart committed
876
                    self.tree.new_error(node, 'Value "%s" not allowed for directive "%s".'
eckhart's avatar
eckhart committed
877
                                        % (value, key))
878
            else:
879
                value = self._extract_regex(node.children[1])
880
                if key == 'whitespace' and not re.match(value, ''):
eckhart's avatar
eckhart committed
881
                    self.tree.new_error(node, "Implicit whitespace should always "
eckhart's avatar
eckhart committed
882
                                        "match the empty string, /%s/ does not." % value)
883
            self.directives[key] = value
884

885
        elif key == 'ignorecase':
eckhart's avatar
eckhart committed
886
            check_argnum()
887
            if node.children[1].content.lower() not in {"off", "false", "no"}:
888
889
                self.re_flags.add('i')

890
        elif key == 'literalws':
eckhart's avatar
eckhart committed
891
892
893
            values = {child.content.strip().lower() for child in node.children[1:]}
            if ((values - {'left', 'right', 'both', 'none'})
                    or ('none' in values and len(values) > 1)):
eckhart's avatar
eckhart committed
894
                self.tree.new_error(node, 'Directive "literalws" allows only `left`, `right`, '
eckhart's avatar
eckhart committed
895
896
897
                                    '`both` or `none`, not `%s`' % ", ".join(values))
            wsp = {'left', 'right'} if 'both' in values \
                else {} if 'none' in values else values
898
            self.directives.literalws = wsp
899

900
        elif key in {'tokens', 'preprocessor_tokens'}:
eckhart's avatar
eckhart committed
901
            tokens = {child.content.strip() for child in node.children[1:]}
902
            redeclared = self.directives.tokens & tokens
903
            if redeclared:
eckhart's avatar
eckhart committed
904
                self.tree.new_error(node, 'Tokens %s have already been declared earlier. '
eckhart's avatar
eckhart committed
905
906
                                    % str(redeclared) + 'Later declaration will be ignored',
                                    code=Error.REDECLARED_TOKEN_WARNING)
907
            self.directives.tokens |= tokens - redeclared
908

909
        elif key.endswith('_filter'):
eckhart's avatar
eckhart committed
910
            check_argnum()
911
            symbol = key[:-7]
912
            self.directives.filter[symbol] = node.children[1].content.strip()
913

914
        elif key.endswith('_error'):
915
            check_argnum(2)
916
            symbol = key[:-6]
917
            error_msgs = self.directives.error.get(symbol, [])
eckhart's avatar
eckhart committed
918
919
920
            if symbol in self.rules:
                self.tree.new_error(node, 'Custom error message for symbol "%s"' % symbol
                                    + 'must be defined before the symbol!')
921
922
923
924
            if node.children[1 if len(node.children) == 2 else 2].parser.name != 'literal':
                self.tree.new_error(
                    node, 'Directive "%s" requires message string or a a pair ' % key +
                    '(regular expression or search string, message string) as argument!')
925
            if len(node.children) == 2:
926
927
928
929