ebnf.py 55.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29
30
import keyword
import os
31

eckhart's avatar
eckhart committed
32
from DHParser.compile import CompilerError, Compiler, compile_source
33
from DHParser.error import Error
34
35
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
36
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
37
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
38
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
39
    GLOBALS, CONFIG_PRESET, get_config_value, unrepr, typing
eckhart's avatar
eckhart committed
40
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
41
    reduce_single_child, replace_by_single_child, remove_expendables, \
eckhart's avatar
eckhart committed
42
    remove_tokens, flatten, forbid, assert_content
43
from DHParser.versionnumber import __version__
44
from typing import Callable, Dict, List, Set, Tuple, Sequence, Union, Optional, Any
eckhart's avatar
eckhart committed
45

46

47
__all__ = ('get_ebnf_preprocessor',
48
49
50
51
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
52
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
53
           'EBNFCompilerError',
54
           'EBNFCompiler',
55
           'grammar_changed',
eckhart's avatar
eckhart committed
56
           'compile_ebnf',
57
           'PreprocessorFactoryFunc',
58
59
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
60
           'CompilerFactoryFunc')
61
62


Eckhart Arnold's avatar
Eckhart Arnold committed
63
64
65
66
67
68
69
########################################################################
#
# EBNF scanning
#
########################################################################


70
71
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
72
73
74
75
76
77
78
79


########################################################################
#
# EBNF parsing
#
########################################################################

80

di68kap's avatar
di68kap committed
81
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
82
    r"""
eckhart's avatar
eckhart committed
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
124
    """
di68kap's avatar
di68kap committed
125
    expression = Forward()
eckhart's avatar
eckhart committed
126
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
127
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
128
129
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
130
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
131
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
132
    EOF = NegativeLookahead(RegExp('.'))
133
134
135
136
137
138
139
140
141
142
143
144
145
146
    whitespace = Series(RegExp('~'), wsp__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), wsp__), Series(RegExp("'(?:[^']|\\\\')*?'"), wsp__))
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
147
148
149
150
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
151
152
153
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
154
155
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
156
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
157
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
158
159
160
    root__ = syntax


161
def grammar_changed(grammar_class, grammar_source: str) -> bool:
162
163
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
182
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
183
184
185
186
187
188
189
190
191
192
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


193
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
194
    try:
195
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
196
        return grammar
197
    except AttributeError:
198
199
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
200
201
202
203
204
205
206
207
208


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


209
EBNF_AST_transformation_table = {
210
    # AST Transformations for EBNF-grammar
211
    "<":
212
        remove_expendables,
213
    "syntax":
214
        [],  # otherwise '"*": replace_by_single_child' would be applied
215
    "directive, definition":
eckhart's avatar
eckhart committed
216
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
217
    "expression":
218
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
219
    "term":
220
221
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
222
    "factor, flowmarker, retrieveop":
223
        replace_by_single_child,
224
    "group":
225
        [remove_brackets, replace_by_single_child],
226
227
    "unordered":
        remove_brackets,
228
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
229
        [reduce_single_child, remove_brackets,
230
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
231
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
232
        reduce_single_child,
233
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
234
        reduce_single_child,
eckhart's avatar
eckhart committed
235
236
    # "list_":
    #     [flatten, remove_infix_operator],
237
    "*":
238
        replace_by_single_child
239
240
}

241

Eckhart Arnold's avatar
Eckhart Arnold committed
242
def EBNFTransform() -> TransformationFunc:
243
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
244

eckhart's avatar
eckhart committed
245

246
def get_ebnf_transformer() -> TransformationFunc:
247
    try:
248
        transformer = GLOBALS.EBNF_transformer_singleton
249
    except AttributeError:
250
251
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
252
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
253
254
255
256
257
258
259
260


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

261

262
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
263
ParserFactoryFunc = Callable[[], Grammar]
264
TransformerFactoryFunc = Callable[[], TransformationFunc]
265
266
CompilerFactoryFunc = Callable[[], Compiler]

267
268
269
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
270
271
272
273
'''


GRAMMAR_FACTORY = '''
274
def get_grammar() -> {NAME}Grammar:
eckhart's avatar
eckhart committed
275
    global GLOBALS
276
    try:
277
        grammar = GLOBALS.{NAME}_{ID}_grammar_singleton
278
    except AttributeError:
279
        GLOBALS.{NAME}_{ID}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
280
281
        if hasattr(get_grammar, 'python_src__'):
            GLOBALS.{NAME}_{ID}_grammar_singleton.python_src__ = get_grammar.python_src__
282
        grammar = GLOBALS.{NAME}_{ID}_grammar_singleton
283
    return grammar
284
285
286
287
'''


TRANSFORMER_FACTORY = '''
288
289
290
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

291
def get_transformer() -> TransformationFunc:
292
    try:
293
        transformer = GLOBALS.{NAME}_{ID}_transformer_singleton
294
    except AttributeError:
295
296
        GLOBALS.{NAME}_{ID}_transformer_singleton = {NAME}Transform()
        transformer = GLOBALS.{NAME}_{ID}_transformer_singleton
297
    return transformer
298
299
300
301
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
302
def get_compiler() -> {NAME}Compiler:
303
    try:
304
        compiler = GLOBALS.{NAME}_{ID}_compiler_singleton
305
    except AttributeError:
eckhart's avatar
eckhart committed
306
        GLOBALS.{NAME}_{ID}_compiler_singleton = {NAME}Compiler()
307
        compiler = GLOBALS.{NAME}_{ID}_compiler_singleton
308
    return compiler
309
310
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
311

312
313
314
315
316
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}


eckhart's avatar
eckhart committed
317
318
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
319
320


321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

348
349
350
351
352
353
354
355
356
357
358
359
        skip:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for the series-parser when a mandatory item failed to
                match the following text.

        resume:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for after a parsing error has error occurred. Other
                than the skip field, this configures resuming after
                the failing parser has returned.
360
    """
361
362
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'skip',
                 'resume']
eckhart's avatar
eckhart committed
363

364
365
366
367
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
368
        self.tokens = set()   # type: Collection[str]
369
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
370
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
371
        self.skip = dict()    # type: Dict[str, List[Union[unrepr, str]]]
372
373
374
375
376
377
378
379
380
381
382
383
384
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

    def keys(self):
        return self.__dict__.keys()


eckhart's avatar
eckhart committed
385
class EBNFCompilerError(CompilerError):
386
    """Error raised by `EBNFCompiler` class. (Not compilation errors
387
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
388
389
390
    pass


391
class EBNFCompiler(Compiler):
392
393
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
394
    in EBNF-Notation.
395
396
397
398
399
400
401
402
403
404
405
406
407

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
408
        current_symbols:  During compilation, a list containing the root
409
410
411
412
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

413
        rules:  Dictionary that maps rule names to a list of Nodes that
414
415
416
417
418
419
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

420
                Now `[node.content for node in self.rules['alternative']]`
421
422
                yields `['alternative = a | b', 'a', 'b']`

423
        symbols:  A mapping of symbol names to their first usage (not
424
425
                their definition!) in the EBNF source.

426
        variables:  A set of symbols names that are used with the
427
428
429
430
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

431
        recursive:  A set of symbols that are used recursively and
432
433
                therefore require a `Forward`-operator.

434
        definitions:  A dictionary of definitions. Other than `rules`
435
436
                this maps the symbols to their compiled definienda.

437
        deferred_tasks:  A list of callables that is filled during
438
439
440
441
442
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

443
        root_symbol: The name of the root symbol.
444

445
446
447
448
449
450
451
452
453
454
455
456
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
457
458
459
460
                `test_ebnf.TestErrorCustomization`.

        consumed_skip_rules: The same as `consumed_custom_errors` only for
                in-series-resume-rules (aka 'skip-rules') for Series-parsers.
461
462
463

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
464

eckhart's avatar
eckhart committed
465
466
467
468
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

469
470
471
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
472
473
    """
    COMMENT_KEYWORD = "COMMENT__"
474
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
475
    RAW_WS_KEYWORD = "WHITESPACE__"
476
    WHITESPACE_PARSER_KEYWORD = "wsp__"
477
    RESUME_RULES_KEYWORD = "resume_rules__"
478
    SKIP_RULES_SUFFIX = '_skip__'
eckhart's avatar
eckhart committed
479
480
481
    ERR_MSG_SUFFIX = '_err_msg__'
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD,
                        RESUME_RULES_KEYWORD, ERR_MSG_SUFFIX}
482
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
483
                "Potentially due to erroneous AST transformation."
484
485
486
487
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
488
    REPEATABLE_DIRECTIVES = {'tokens'}
489

490

eckhart's avatar
eckhart committed
491
    def __init__(self, grammar_name="DSL", grammar_source=""):
492
        self.grammar_id = 0
eckhart's avatar
eckhart committed
493
494
        super(EBNFCompiler, self).__init__()  # calls the _reset()-method
        self.set_grammar_name(grammar_name, grammar_source)
495

496

497
    def _reset(self):
498
        super(EBNFCompiler, self)._reset()
499
        self._result = ''           # type: str
500
        self.re_flags = set()       # type: Set[str]
501
502
503
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
504
505
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
506
        self.definitions = {}       # type: Dict[str, str]
507
        self.deferred_tasks = []    # type: List[Callable]
508
        self.root_symbol = ""       # type: str
509
510
511
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
512
        self.consumed_skip_rules = set()     # type: Set[str]
513
514
        self.grammar_id += 1

515

Eckhart Arnold's avatar
Eckhart Arnold committed
516
    @property
517
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
518
519
        return self._result

eckhart's avatar
eckhart committed
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
        self.grammar_name = grammar_name
        self.grammar_source = load_if_file(grammar_source)
        return self


537
    # methods for generating skeleton code for preprocessor, transformer, and compiler
538

539
    def gen_preprocessor_skeleton(self) -> str:
540
541
542
543
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
544
        name = self.grammar_name + "Preprocessor"
545
        return "def %s(text):\n    return text, lambda i: i\n" % name \
546
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
547

548

549
    def gen_transformer_skeleton(self) -> str:
550
551
552
553
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
554
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
555
556
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
557
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
558
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
559
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
560
        transtable.append('    "<": remove_empty,')
561
        for name in self.rules:
eckhart's avatar
eckhart committed
562
            transformations = '[]'
563
564
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
565
                transformations = '[replace_or_reduce]'
566
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
567
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
568
            transtable.append('    "' + name + '": %s,' % transformations)
569
        transtable.append('    ":Token": reduce_single_child,')
570
        transtable += ['    "*": replace_by_single_child', '}', '']
571
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
572
573
        return '\n'.join(transtable)

574

575
    def gen_compiler_skeleton(self) -> str:
576
577
578
579
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
580
        if not self.rules:
581
582
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
583
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
584
585
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
586
                    '    """', '',
eckhart's avatar
eckhart committed
587
588
589
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
590
591
592
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
593
        for name in self.rules:
594
            method_name = Compiler.method_name(name)
595
            if name == self.root_symbol:
596
                compiler += ['    def ' + method_name + '(self, node):',
597
                             '        return self.fallback_compiler(node)', '']
598
            else:
di68kap's avatar
di68kap committed
599
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
600
                             '    #     return node', '']
601
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
602
        return '\n'.join(compiler)
603

604
    def verify_transformation_table(self, transtable):
605
606
607
608
609
610
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
611
        assert self._dirty_flag
612
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
613
614
615
616
617
618
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
619
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING))
620
621
        return messages

622
623
624
625
626
627
628
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
629

630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
        elif value[0] + value[-1] == '//':
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
658
    def _gen_search_rule(self, nd: Node) -> ReprType:
659
660
661
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
662
663
664
665
666
667
668
669
        """
        if nd.parser.name == 'regexp':
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
        elif nd.parser.name == 'literal':
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''

670
671
672
673
674
675
676
    def _gen_search_list(self, nodes: Sequence[Node]) -> List[Union[unrepr, str]]:
        search_list = []  # type: List[Union[unrepr, str]]
        for child in nodes:
            rule = self._gen_search_rule(child)
            search_list.append(rule if rule else unrepr(child.content.strip()))
        return search_list

677

678
679
680
681
682
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
683
684
685
686
687
688
689
690
691
692

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

693
694
695
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
696
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
697

698
699
        # add special fields for Grammar class

700
701
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
702
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
703
704
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
705
706
        definitions.append((self.RAW_WS_KEYWORD, "r'{}'".format(self.directives.whitespace)))
        definitions.append((self.COMMENT_KEYWORD, "r'{}'".format(self.directives.comment)))
707
708
709

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
710
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
711
        for symbol, raw_rules in self.directives.resume.items():
712
713
714
715
716
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
717
                        refined = self._gen_search_rule(nd)
718
719
720
721
722
723
724
725
726
727
728
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
729

eckhart's avatar
eckhart committed
730
731
        # prepare and add customized error-messages

732
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
733
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
734
735
736
737
738
739
740
741
742
743
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

744
745
746
747
748
749
        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_custom_errors:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, 'Customized error message for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)

        # prepare and add skip-rules

        for symbol, skip in self.directives.skip.items():
            skip_rules = []  # type: List[Tuple[ReprType, ReprType]]
            for search in skip:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                skip_rules.append(search)
            definitions.append((symbol + self.SKIP_RULES_SUFFIX, repr(skip_rules)))

        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_skip_rules:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, '"Skip-rules" for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
773

774
775
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
776

777
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
778
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
779
780
781
782
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
783
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
784
        definitions.append(('parser_initialization__', '"upon instantiation"'))
785
        if self.grammar_source:
786
            definitions.append(('source_hash__',
787
                                '"%s"' % md5(self.grammar_source, __version__)))
788
            declarations.append('')
789
790
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
791
792
793
794
795
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
796

797
        self.root_symbol = definitions[0][0] if definitions else ""
798
799
800
801
802
803
804
805
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
806
807
808
809
810
811

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
812
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
813
                                    "Missing definition for symbol '%s'" % symbol)
814
                # root_node.error_flag = True
815
816
817

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
818
819
820
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
821
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
822
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
823
824
825
826
827
828
829
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
830
831
832
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
833

834
        # set root_symbol parser and assemble python grammar definition
835

836
837
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
838
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
839
        self._result = '\n    '.join(declarations) \
840
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
841
        return self._result
842

843
844
845

    ## compilation methods

846
    def on_syntax(self, node: Node) -> str:
847
        definitions = []  # type: List[Tuple[str, str]]
848
849

        # drop the wrapping sequence node
850
851
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
852
853

        # compile definitions and directives and collect definitions
854
        for nd in node.children:
855
            if nd.parser.name == "definition":
856
                definitions.append(self.compile(nd))
857
            else:
858
                assert nd.parser.name == "directive", nd.as_sxpr()
859
                self.compile(nd)
860
            # node.error_flag = max(node.error_flag, nd.error_flag)
861
        self.definitions.update(definitions)
862

863
        return self.assemble_parser(definitions, node)
864

865

866
    def on_definition(self, node: Node) -> Tuple[str, str]:
867
        rule = node.children[0].content
868
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
869
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
870
            if not first.errors:
eckhart's avatar
eckhart committed
871
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
872
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
873
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
874
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
875
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
876
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
877
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
878
                                ' end with a doube underscore "__".' % rule)
879
        elif rule in self.directives.tokens:
eckhart's avatar
eckhart committed
880
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
881
                                'a preprocessor token.' % rule)
882
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
883
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
884
                                % rule + '(This may change in the future.)')
885
        try:
886
887
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
888
            defn = self.compile(node.children[1])
889
            if rule in self.variables:
890
                defn = 'Capture(%s)' % defn
891
                self.variables.remove(rule)
892
893
894
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
895
        except TypeError as error:
896
897
898
899
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
900
            self.tree.new_error(node, errmsg)
901
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
902
        return rule, defn
903

904

905
    def on_directive(self, node: Node) -> str:
906
        key = node.children[0].content
907
        assert key not in self.directives.tokens
908

909
910
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
911
                self.tree.new_error(node, 'Directive "%s" has already been defined earlier. '
eckhart's avatar
eckhart committed
912
913
                                    % key + 'Later definition will be ignored!',
                                    code=Error.REDEFINED_DIRECTIVE_WARNING)
914
915
916
                return ""
            self.defined_directives.add(key)

eckhart's avatar
eckhart committed
917
918
        def check_argnum(n: int = 1):
            if len(node.children) > n + 1:
919
                self.tree.new_error(node, 'Directive "%s" can have at most %i values.' % (key, n))
eckhart's avatar
eckhart committed
920

921
        if key in {'comment', 'whitespace'}:
eckhart's avatar
eckhart committed
922
923
924
            check_argnum()
            if node.children[1].parser.name == "symbol":
                value = node.children[1].content
925
926
                if key == 'whitespace' and value in WHITESPACE_TYPES:
                    value = WHITESPACE_TYPES[value]  # replace whitespace-name by regex
927
                else:
eckhart's avatar
eckhart committed
928
                    self.tree.new_error(node, 'Value "%s" not allowed for directive "%s".'
eckhart's avatar
eckhart committed
929
                                        % (value, key))
930
            else:
931
                value = self._extract_regex(node.children[1])
932
                if key == 'whitespace' and not re.match(value, ''):
eckhart's avatar
eckhart committed
933
                    self.tree.new_error(node, "Implicit whitespace should always "
eckhart's avatar
eckhart committed
934
                                        "match the empty string, /%s/ does not." % value)
935
            self.directives[key] = value
936

937
        elif key == 'ignorecase':
eckhart's avatar
eckhart committed
938
            check_argnum()
939
            if node.children[1].content.lower() not in {"off", "false", "no"}:
940
941
                self.re_flags.add('i')

942
        elif key == 'literalws':
eckhart's avatar
eckhart committed
943
944
945
            values = {child.content.strip().lower() for child in node.children[1:]}
            if ((values - {'left', 'right', 'both', 'none'})
                    or ('none' in values and len(values) > 1)):
eckhart's avatar
eckhart committed
946
                self.tree.new_error(node, 'Directive "literalws" allows only `left`, `right`, '
eckhart's avatar
eckhart committed
947
948
949
                                    '`both` or `none`, not `%s`' % ", ".join(values))
            wsp = {'left', 'right'} if 'both' in values \
                else {} if 'none' in values else values
950
            self.directives.literalws = wsp
951

952
        elif key in {'tokens', 'preprocessor_tokens'}: