Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

ebnf.py 55.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29
30
import keyword
import os
31

eckhart's avatar
eckhart committed
32
from DHParser.compile import CompilerError, Compiler, compile_source, visitor_name
33
from DHParser.error import Error
34
35
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
36
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
37
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
38
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
39
    GLOBALS, CONFIG_PRESET, get_config_value, unrepr, typing
eckhart's avatar
eckhart committed
40
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
41
    reduce_single_child, replace_by_single_child, remove_expendables, \
eckhart's avatar
eckhart committed
42
    remove_tokens, flatten, forbid, assert_content
43
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
44
from typing import Callable, Dict, List, Set, Tuple, Sequence, Union, Optional, Any, cast
eckhart's avatar
eckhart committed
45

46

47
__all__ = ('get_ebnf_preprocessor',
48
49
50
51
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
52
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
53
           'EBNFCompilerError',
54
           'EBNFCompiler',
55
           'grammar_changed',
eckhart's avatar
eckhart committed
56
           'compile_ebnf',
57
           'PreprocessorFactoryFunc',
58
59
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
60
           'CompilerFactoryFunc')
61
62


63
64
65
66
67
68
69
70
71
########################################################################
#
# Presets
#
########################################################################

CONFIG_PRESET['add_grammar_source_to_parser_docstring'] = False


Eckhart Arnold's avatar
Eckhart Arnold committed
72
73
74
75
76
77
78
########################################################################
#
# EBNF scanning
#
########################################################################


79
80
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
81
82
83
84
85
86
87
88


########################################################################
#
# EBNF parsing
#
########################################################################

89

di68kap's avatar
di68kap committed
90
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
91
    r"""
eckhart's avatar
eckhart committed
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
133
    """
di68kap's avatar
di68kap committed
134
    expression = Forward()
eckhart's avatar
eckhart committed
135
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
eckhart's avatar
eckhart committed
136
    parser_initialization__ = ["upon instantiation"]
di68kap's avatar
di68kap committed
137
138
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
139
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
140
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
141
    EOF = NegativeLookahead(RegExp('.'))
142
143
144
145
146
147
148
149
150
151
152
153
154
155
    whitespace = Series(RegExp('~'), wsp__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), wsp__), Series(RegExp("'(?:[^']|\\\\')*?'"), wsp__))
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
156
157
158
159
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
160
161
162
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
163
164
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
165
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
166
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
167
168
169
    root__ = syntax


170
def grammar_changed(grammar_class, grammar_source: str) -> bool:
171
172
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
191
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
192
193
194
195
196
197
198
199
200
201
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


202
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
203
    try:
204
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
205
        return grammar
206
    except AttributeError:
207
208
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
209
210
211
212
213
214
215
216
217


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


218
EBNF_AST_transformation_table = {
219
    # AST Transformations for EBNF-grammar
220
    "<":
221
        remove_expendables,
222
    "syntax":
223
        [],  # otherwise '"*": replace_by_single_child' would be applied
224
    "directive, definition":
eckhart's avatar
eckhart committed
225
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
226
    "expression":
227
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
228
    "term":
229
230
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
231
    "factor, flowmarker, retrieveop":
232
        replace_by_single_child,
233
    "group":
234
        [remove_brackets, replace_by_single_child],
235
236
    "unordered":
        remove_brackets,
237
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
238
        [reduce_single_child, remove_brackets,
239
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
240
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
241
        reduce_single_child,
242
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
243
        reduce_single_child,
eckhart's avatar
eckhart committed
244
245
    # "list_":
    #     [flatten, remove_infix_operator],
246
    "*":
247
        replace_by_single_child
248
249
}

250

Eckhart Arnold's avatar
Eckhart Arnold committed
251
def EBNFTransform() -> TransformationFunc:
252
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
253

eckhart's avatar
eckhart committed
254

255
def get_ebnf_transformer() -> TransformationFunc:
256
    try:
257
        transformer = GLOBALS.EBNF_transformer_singleton
258
    except AttributeError:
259
260
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
261
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
262
263
264
265
266
267
268
269


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

270

271
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
272
ParserFactoryFunc = Callable[[], Grammar]
273
TransformerFactoryFunc = Callable[[], TransformationFunc]
274
275
CompilerFactoryFunc = Callable[[], Compiler]

276
277
278
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
279
280
281
282
'''


GRAMMAR_FACTORY = '''
283
def get_grammar() -> {NAME}Grammar:
eckhart's avatar
eckhart committed
284
    global GLOBALS
285
    try:
286
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
287
    except AttributeError:
288
        GLOBALS.{NAME}_{ID:08d}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
289
        if hasattr(get_grammar, 'python_src__'):
290
291
            GLOBALS.{NAME}_{ID:08d}_grammar_singleton.python_src__ = get_grammar.python_src__
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
292
    return grammar
293
294
295
296
'''


TRANSFORMER_FACTORY = '''
297
298
299
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

300
def get_transformer() -> TransformationFunc:
301
    try:
302
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
303
    except AttributeError:
304
305
        GLOBALS.{NAME}_{ID:08d}_transformer_singleton = {NAME}Transform()
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
306
    return transformer
307
308
309
310
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
311
def get_compiler() -> {NAME}Compiler:
312
    try:
313
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
314
    except AttributeError:
315
316
        GLOBALS.{NAME}_{ID:08d}_compiler_singleton = {NAME}Compiler()
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
317
    return compiler
318
319
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
320

321
322
323
324
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}

325
326
327
DROP_TOKEN  = 'token'
DROP_WSPC   = 'whitespace'
DROP_VALUES = {DROP_TOKEN, DROP_WSPC}
328

eckhart's avatar
eckhart committed
329
330
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
331
332


333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

360
361
362
363
364
365
366
367
368
369
370
371
        skip:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for the series-parser when a mandatory item failed to
                match the following text.

        resume:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for after a parsing error has error occurred. Other
                than the skip field, this configures resuming after
                the failing parser has returned.
372
    """
373
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'skip',
374
                 'resume', 'drop']
eckhart's avatar
eckhart committed
375

376
377
378
379
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
380
        self.tokens = set()   # type: Collection[str]
381
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
382
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
383
        self.skip = dict()    # type: Dict[str, List[Union[unrepr, str]]]
384
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]
385
        self.drop = set()     # type: Set[str]
386
387
388
389
390
391
392
393
394
395
396
397

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

    def keys(self):
        return self.__dict__.keys()


eckhart's avatar
eckhart committed
398
class EBNFCompilerError(CompilerError):
399
    """Error raised by `EBNFCompiler` class. (Not compilation errors
400
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
401
402
403
    pass


404
class EBNFCompiler(Compiler):
405
406
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
407
    in EBNF-Notation.
408
409
410
411
412
413
414

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

415
    Additionally, class EBNFCompiler provides helper methods to generate
416
417
418
419
420
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
421
        current_symbols:  During compilation, a list containing the root
422
423
424
425
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

426
        rules:  Dictionary that maps rule names to a list of Nodes that
427
428
429
430
431
432
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

433
                Now `[node.content for node in self.rules['alternative']]`
434
435
                yields `['alternative = a | b', 'a', 'b']`

436
        symbols:  A mapping of symbol names to their first usage (not
437
438
                their definition!) in the EBNF source.

439
        variables:  A set of symbols names that are used with the
440
441
442
443
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

444
        recursive:  A set of symbols that are used recursively and
445
446
                therefore require a `Forward`-operator.

447
        definitions:  A dictionary of definitions. Other than `rules`
448
449
                this maps the symbols to their compiled definienda.

450
        deferred_tasks:  A list of callables that is filled during
451
452
453
454
455
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

456
        root_symbol: The name of the root symbol.
457

458
459
460
461
462
463
464
465
466
467
468
469
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
470
471
472
473
                `test_ebnf.TestErrorCustomization`.

        consumed_skip_rules: The same as `consumed_custom_errors` only for
                in-series-resume-rules (aka 'skip-rules') for Series-parsers.
474
475
476

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
477

eckhart's avatar
eckhart committed
478
479
480
481
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

482
483
484
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
485
486
    """
    COMMENT_KEYWORD = "COMMENT__"
487
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
488
    RAW_WS_KEYWORD = "WHITESPACE__"
489
    WHITESPACE_PARSER_KEYWORD = "wsp__"
490
    DROP_WHITESPACE_PARSER_KEYWORD = "dwsp__"
491
    RESUME_RULES_KEYWORD = "resume_rules__"
492
    SKIP_RULES_SUFFIX = '_skip__'
eckhart's avatar
eckhart committed
493
494
495
    ERR_MSG_SUFFIX = '_err_msg__'
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD,
                        RESUME_RULES_KEYWORD, ERR_MSG_SUFFIX}
496
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
497
                "Potentially due to erroneous AST transformation."
498
499
500
501
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
502
    REPEATABLE_DIRECTIVES = {'tokens'}
503

504

eckhart's avatar
eckhart committed
505
    def __init__(self, grammar_name="DSL", grammar_source=""):
506
        self.grammar_id = 0
eckhart's avatar
eckhart committed
507
508
        super(EBNFCompiler, self).__init__()  # calls the _reset()-method
        self.set_grammar_name(grammar_name, grammar_source)
509

510

511
    def _reset(self):
512
        super(EBNFCompiler, self)._reset()
513
        self._result = ''           # type: str
514
        self.re_flags = set()       # type: Set[str]
515
516
517
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
518
519
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
520
        self.definitions = {}       # type: Dict[str, str]
521
        self.deferred_tasks = []    # type: List[Callable]
522
        self.root_symbol = ""       # type: str
523
524
525
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
526
        self.consumed_skip_rules = set()     # type: Set[str]
527
528
        self.grammar_id += 1

529

Eckhart Arnold's avatar
Eckhart Arnold committed
530
    @property
531
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
532
533
        return self._result

eckhart's avatar
eckhart committed
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
        self.grammar_name = grammar_name
        self.grammar_source = load_if_file(grammar_source)
        return self


551
    # methods for generating skeleton code for preprocessor, transformer, and compiler
552

553
    def gen_preprocessor_skeleton(self) -> str:
554
555
556
557
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
558
        name = self.grammar_name + "Preprocessor"
559
        return "def %s(text):\n    return text, lambda i: i\n" % name \
560
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
561

562

563
    def gen_transformer_skeleton(self) -> str:
564
565
566
567
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
568
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
569
570
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
571
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
572
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
573
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
574
        transtable.append('    "<": flatten_anonymous_nodes,')
575
        for name in self.rules:
eckhart's avatar
eckhart committed
576
            transformations = '[]'
577
578
579
580
581
            # rule = self.definitions[name]
            # if rule.startswith('Alternative'):
            #     transformations = '[replace_or_reduce]'
            # elif rule.startswith('Synonym'):
            #     transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
582
            transtable.append('    "' + name + '": %s,' % transformations)
583
        # transtable.append('    ":Token": reduce_single_child,')
eckhart's avatar
eckhart committed
584
        transtable += ['    "*": replace_by_single_child', '}', '']
585
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
586
587
        return '\n'.join(transtable)

588

589
    def gen_compiler_skeleton(self) -> str:
590
591
592
593
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
594
        if not self.rules:
595
596
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
597
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
598
599
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
600
                    '    """', '',
eckhart's avatar
eckhart committed
601
602
603
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
604
605
606
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
607
        for name in self.rules:
eckhart's avatar
eckhart committed
608
            method_name = visitor_name(name)
609
            if name == self.root_symbol:
610
                compiler += ['    def ' + method_name + '(self, node):',
611
                             '        return self.fallback_compiler(node)', '']
612
            else:
di68kap's avatar
di68kap committed
613
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
614
                             '    #     return node', '']
615
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
616
        return '\n'.join(compiler)
617

618
    def verify_transformation_table(self, transtable):
619
620
621
622
623
624
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
625
        assert self._dirty_flag
626
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
627
628
629
630
631
632
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
633
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING))
634
635
        return messages

636
637
638
639
640
641
642
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
643

644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
667
        elif value[0] + value[-1] == '//' and value != '//':
668
669
670
671
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
672
    def _gen_search_rule(self, nd: Node) -> ReprType:
673
674
675
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
676
        """
677
        if nd.tag_name == 'regexp':
678
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
679
        elif nd.tag_name == 'literal':
680
681
682
683
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''

684
685
686
687
688
689
690
    def _gen_search_list(self, nodes: Sequence[Node]) -> List[Union[unrepr, str]]:
        search_list = []  # type: List[Union[unrepr, str]]
        for child in nodes:
            rule = self._gen_search_rule(child)
            search_list.append(rule if rule else unrepr(child.content.strip()))
        return search_list

691

692
693
694
695
696
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
697
698
699
700
701
702
703
704
705
706

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

707
708
709
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
710
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
711

712
713
        # add special fields for Grammar class

714
715
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
716
717
718
        if DROP_WSPC in self.directives.drop:
            definitions.append((self.DROP_WHITESPACE_PARSER_KEYWORD,
                                'DropWhitespace(%s)' % self.WHITESPACE_KEYWORD))
719
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
720
721
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
722
723
        definitions.append((self.RAW_WS_KEYWORD, "r'{}'".format(self.directives.whitespace)))
        definitions.append((self.COMMENT_KEYWORD, "r'{}'".format(self.directives.comment)))
724
725
726

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
727
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
728
        for symbol, raw_rules in self.directives.resume.items():
729
730
731
732
733
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
734
                        refined = self._gen_search_rule(nd)
735
736
737
738
739
740
741
742
743
744
745
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
746

eckhart's avatar
eckhart committed
747
748
        # prepare and add customized error-messages

749
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
750
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
751
752
753
754
755
756
757
758
759
760
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

761
762
763
764
765
766
        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_custom_errors:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, 'Customized error message for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
767
768
769
770
771
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)

        # prepare and add skip-rules

        for symbol, skip in self.directives.skip.items():
Eckhart Arnold's avatar
Eckhart Arnold committed
772
            skip_rules = []  # type: List[ReprType]
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
            for search in skip:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                skip_rules.append(search)
            definitions.append((symbol + self.SKIP_RULES_SUFFIX, repr(skip_rules)))

        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_skip_rules:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, '"Skip-rules" for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
790

791
792
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
793

794
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
795
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
796
797
798
799
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
800
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
eckhart's avatar
eckhart committed
801
        definitions.append(('parser_initialization__', '["upon instantiation"]'))
802
        if self.grammar_source:
803
            definitions.append(('source_hash__',
804
                                '"%s"' % md5(self.grammar_source, __version__)))
805
            declarations.append('')
806
807
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
808
809
810
811
812
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
813

814
        self.root_symbol = definitions[0][0] if definitions else ""
815
816
817
818
819
820
821
822
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
823
824
825
826
827
828

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
829
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
830
                                    "Missing definition for symbol '%s'" % symbol)
831
                # root_node.error_flag = True
832
833
834

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
835
836
837
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
838
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
839
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
840
841
842
843
844
845
846
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
847
848
849
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
850

851
        # set root_symbol parser and assemble python grammar definition
852

853
854
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
855
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
856
        self._result = '\n    '.join(declarations) \
857
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
858
        return self._result
859

860
861
862

    ## compilation methods

863
    def on_syntax(self, node: Node) -> str:
864
        definitions = []  # type: List[Tuple[str, str]]
865
866

        # drop the wrapping sequence node
867
        if len(node.children) == 1 and node.children[0].is_anonymous():
868
            node = node.children[0]
869
870

        # compile definitions and directives and collect definitions
871
        for nd in node.children:
872
            if nd.tag_name == "definition":
873
                definitions.append(self.compile(nd))
874
            else:
875
                assert nd.tag_name == "directive", nd.as_sxpr()
876
                self.compile(nd)
877
            # node.error_flag = max(node.error_flag, nd.error_flag)
878
        self.definitions.update(definitions)
879

880
        return self.assemble_parser(definitions, node)
881

882

883
    def on_definition(self, node: Node) -> Tuple[str, str]:
884
        rule = node.children[0].content
885
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
886
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
887
            if not first.errors:
eckhart's avatar
eckhart committed
888
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
889
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
890
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
891
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
892
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
893
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
894
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
895
                                ' end with a doube underscore "__".' % rule)
896
        elif rule in self.directives.tokens:
eckhart's avatar
eckhart committed
897
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
898
                                'a preprocessor token.' % rule)
899
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
900
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
901
                                % rule + '(This may change in the future.)')
902
        try:
903
904
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
905
            defn = self.compile(node.children[1])
906
            if rule in self.variables:
907
                defn = 'Capture(%s)' % defn
908
                self.variables.remove(rule)
909
910
911
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
912
        except TypeError as error:
913
914
915
916
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
917
            self.tree.new_error(node, errmsg)
918
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
919
        return rule, defn
920

921

922
    def on_directive(self, node: Node) -> str:
923
        key = node.children[0].content
924
        assert key not in self.directives.tokens
925

926
        if key not in self.REPEATABLE_DIRECTIVES and not key.endswith('_error'):
927
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
928
                self.tree.new_error(node, 'Directive "%s" has already been defined earlier. '
eckhart's avatar
eckhart committed
929
                                    % key + 'Later definition will be ignored!',
930
                                    code=Error.REDEFINED_DIRECTIVE)
931
932
933
                return ""
            self.defined_directives.add(key)

eckhart's avatar
eckhart committed
934
935
        def check_argnum(n: int = 1):
            if len(node.children) > n + 1:
936
                self.tree.new_error(node, 'Directive "%s" can have at most %i values.' % (key, n))
eckhart's avatar
eckhart committed
937

938
        if key in {'comment', 'whitespace'}:
eckhart's avatar
eckhart committed
939
            check_argnum()
940
            if node.children[1].tag_name == "symbol":