ebnf.py 59.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29
30
import keyword
import os
31

eckhart's avatar
eckhart committed
32
from DHParser.compile import CompilerError, Compiler, compile_source, visitor_name
33
from DHParser.error import Error
34
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
35
36
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
    GrammarError
37
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
38
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
39
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
40
    GLOBALS, CONFIG_PRESET, get_config_value, unrepr, compile_python_object, typing
eckhart's avatar
eckhart committed
41
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
42
    reduce_single_child, replace_by_single_child, remove_expendables, \
eckhart's avatar
eckhart committed
43
    remove_tokens, flatten, forbid, assert_content
44
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
45
from typing import Callable, Dict, List, Set, Tuple, Sequence, Union, Optional, Any, cast
eckhart's avatar
eckhart committed
46

47

48
__all__ = ('get_ebnf_preprocessor',
49
50
51
52
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
53
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
54
           'EBNFCompilerError',
55
           'EBNFCompiler',
56
           'grammar_changed',
eckhart's avatar
eckhart committed
57
           'compile_ebnf',
58
           'PreprocessorFactoryFunc',
59
60
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
61
           'CompilerFactoryFunc')
62
63


64
65
66
67
68
69
70
########################################################################
#
# Presets
#
########################################################################

CONFIG_PRESET['add_grammar_source_to_parser_docstring'] = False
Eckhart Arnold's avatar
Eckhart Arnold committed
71
72
73
74
# CONFIG_PRESET['static_analysis'] = "early" # do a static analysis right
#                                            # after ebnf compilation
# already set in parse.py - config vars should probably moved to a
#                           a dedicated global module
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114

########################################################################
#
# source code support
#
########################################################################


dhparserdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))


DHPARSER_IMPORTS = '''
import collections
from functools import partial
import os
import sys

sys.path.append(r'{dhparserdir}')

try:
    import regex as re
except ImportError:
    import re
from DHParser import logging, is_filename, load_if_file, \\
    Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, DropWhitespace, \\
    Lookbehind, Lookahead, Alternative, Pop, Token, DropToken, Synonym, AllOf, SomeOf, \\
    Unordered, Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \\
    ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \\
    grammar_changed, last_value, counterpart, accumulate, PreprocessorFunc, is_empty, \\
    Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \\
    remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \\
    reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
    remove_expendables, remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \\
    is_expendable, collapse, collapse_if, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
    remove_nodes, remove_content, remove_brackets, replace_parser, remove_anonymous_tokens, \\
    keep_children, is_one_of, not_one_of, has_content, apply_if, remove_first, remove_last, \\
    remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \\
    replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \\
    flatten_anonymous_nodes, error_on, recompile_grammar, GLOBALS
'''.format(dhparserdir=dhparserdir)
115
116


Eckhart Arnold's avatar
Eckhart Arnold committed
117
118
119
120
121
122
123
########################################################################
#
# EBNF scanning
#
########################################################################


124
125
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
126
127
128
129
130
131
132
133


########################################################################
#
# EBNF parsing
#
########################################################################

134

di68kap's avatar
di68kap committed
135
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
136
    r"""
eckhart's avatar
eckhart committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
178
    """
di68kap's avatar
di68kap committed
179
    expression = Forward()
eckhart's avatar
eckhart committed
180
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
181
    static_analysis_pending__ = False
eckhart's avatar
eckhart committed
182
    parser_initialization__ = ["upon instantiation"]
di68kap's avatar
di68kap committed
183
184
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
185
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
186
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
187
    EOF = NegativeLookahead(RegExp('.'))
188
189
    whitespace = Series(RegExp('~'), wsp__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), wsp__)
190
191
192
193
194
    # plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), wsp__)
    plaintext = RegExp('`(?:\\\\`|[^"])*?`')
    # literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), wsp__), Series(RegExp("'(?:[^']|\\\\')*?'"), wsp__))
    literal = Alternative(Series(RegExp('"(?:\\\\"|[^"])*?"'), wsp__),
                          Series(RegExp("'(?:\\\\'|[^'])*?'"), wsp__))
195
196
197
198
199
200
201
202
203
204
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
205
206
207
208
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
209
210
211
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
212
213
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
214
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
215
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
216
217
218
    root__ = syntax


219
def grammar_changed(grammar_class, grammar_source: str) -> bool:
220
221
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
240
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
241
242
243
244
245
246
247
248
249
250
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


251
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
252
    try:
253
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
254
        return grammar
255
    except AttributeError:
256
257
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
258
259
260
261
262
263
264
265
266


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


267
EBNF_AST_transformation_table = {
268
    # AST Transformations for EBNF-grammar
269
    "<":
270
        remove_expendables,
271
    "syntax":
272
        [],  # otherwise '"*": replace_by_single_child' would be applied
273
    "directive, definition":
eckhart's avatar
eckhart committed
274
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
275
    "expression":
276
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
277
    "term":
278
279
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
280
    "factor, flowmarker, retrieveop":
281
        replace_by_single_child,
282
    "group":
283
        [remove_brackets, replace_by_single_child],
284
285
    "unordered":
        remove_brackets,
286
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
287
        [reduce_single_child, remove_brackets,
288
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
289
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
290
        reduce_single_child,
291
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
292
        reduce_single_child,
eckhart's avatar
eckhart committed
293
294
    # "list_":
    #     [flatten, remove_infix_operator],
295
    "*":
296
        replace_by_single_child
297
298
}

299

Eckhart Arnold's avatar
Eckhart Arnold committed
300
def EBNFTransform() -> TransformationFunc:
301
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
302

eckhart's avatar
eckhart committed
303

304
def get_ebnf_transformer() -> TransformationFunc:
305
    try:
306
        transformer = GLOBALS.EBNF_transformer_singleton
307
    except AttributeError:
308
309
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
310
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
311
312
313
314
315
316
317
318


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

319

320
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
321
ParserFactoryFunc = Callable[[], Grammar]
322
TransformerFactoryFunc = Callable[[], TransformationFunc]
323
324
CompilerFactoryFunc = Callable[[], Compiler]

325
326
327
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
328
329
330
331
'''


GRAMMAR_FACTORY = '''
332
def get_grammar() -> {NAME}Grammar:
eckhart's avatar
eckhart committed
333
    global GLOBALS
334
    try:
335
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
336
    except AttributeError:
337
        GLOBALS.{NAME}_{ID:08d}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
338
        if hasattr(get_grammar, 'python_src__'):
339
340
            GLOBALS.{NAME}_{ID:08d}_grammar_singleton.python_src__ = get_grammar.python_src__
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
341
    return grammar
342
343
344
345
'''


TRANSFORMER_FACTORY = '''
346
347
348
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

349
def get_transformer() -> TransformationFunc:
350
    try:
351
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
352
    except AttributeError:
353
354
        GLOBALS.{NAME}_{ID:08d}_transformer_singleton = {NAME}Transform()
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
355
    return transformer
356
357
358
359
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
360
def get_compiler() -> {NAME}Compiler:
361
    try:
362
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
363
    except AttributeError:
364
365
        GLOBALS.{NAME}_{ID:08d}_compiler_singleton = {NAME}Compiler()
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
366
    return compiler
367
368
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
369

370
371
372
373
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}

374
375
376
DROP_TOKEN  = 'token'
DROP_WSPC   = 'whitespace'
DROP_VALUES = {DROP_TOKEN, DROP_WSPC}
377

eckhart's avatar
eckhart committed
378
379
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
380
381


382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

409
410
411
412
413
414
415
416
417
418
419
420
        skip:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for the series-parser when a mandatory item failed to
                match the following text.

        resume:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for after a parsing error has error occurred. Other
                than the skip field, this configures resuming after
                the failing parser has returned.
421
    """
422
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'skip',
423
                 'resume', 'drop']
eckhart's avatar
eckhart committed
424

425
426
427
428
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
429
        self.tokens = set()   # type: Collection[str]
430
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
431
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
432
        self.skip = dict()    # type: Dict[str, List[Union[unrepr, str]]]
433
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]
434
        self.drop = set()     # type: Set[str]
435
436
437
438
439
440
441
442
443
444
445
446

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

    def keys(self):
        return self.__dict__.keys()


eckhart's avatar
eckhart committed
447
class EBNFCompilerError(CompilerError):
448
    """Error raised by `EBNFCompiler` class. (Not compilation errors
449
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
450
451
452
    pass


453
class EBNFCompiler(Compiler):
454
455
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
456
    in EBNF-Notation.
457
458
459
460
461
462
463

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

464
    Additionally, class EBNFCompiler provides helper methods to generate
465
466
467
468
469
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
470
        current_symbols:  During compilation, a list containing the root
471
472
473
474
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

475
        rules:  Dictionary that maps rule names to a list of Nodes that
476
477
478
479
480
481
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

482
                Now `[node.content for node in self.rules['alternative']]`
483
484
                yields `['alternative = a | b', 'a', 'b']`

485
        symbols:  A mapping of symbol names to their first usage (not
486
487
                their definition!) in the EBNF source.

488
        variables:  A set of symbols names that are used with the
489
490
491
492
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

493
        recursive:  A set of symbols that are used recursively and
494
495
                therefore require a `Forward`-operator.

496
        definitions:  A dictionary of definitions. Other than `rules`
497
498
                this maps the symbols to their compiled definienda.

499
        deferred_tasks:  A list of callables that is filled during
500
501
502
503
504
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

505
        root_symbol: The name of the root symbol.
506

507
508
509
510
511
512
513
514
515
516
517
518
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
519
520
521
522
                `test_ebnf.TestErrorCustomization`.

        consumed_skip_rules: The same as `consumed_custom_errors` only for
                in-series-resume-rules (aka 'skip-rules') for Series-parsers.
523
524
525

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
526

eckhart's avatar
eckhart committed
527
528
529
530
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

531
532
533
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
534
535
    """
    COMMENT_KEYWORD = "COMMENT__"
536
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
537
    RAW_WS_KEYWORD = "WHITESPACE__"
538
    WHITESPACE_PARSER_KEYWORD = "wsp__"
539
    DROP_WHITESPACE_PARSER_KEYWORD = "dwsp__"
540
    RESUME_RULES_KEYWORD = "resume_rules__"
541
    SKIP_RULES_SUFFIX = '_skip__'
eckhart's avatar
eckhart committed
542
543
544
    ERR_MSG_SUFFIX = '_err_msg__'
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD,
                        RESUME_RULES_KEYWORD, ERR_MSG_SUFFIX}
545
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
546
                "Potentially due to erroneous AST transformation."
547
548
549
550
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
551
    REPEATABLE_DIRECTIVES = {'tokens'}
552

553

eckhart's avatar
eckhart committed
554
    def __init__(self, grammar_name="DSL", grammar_source=""):
555
        self.grammar_id = 0
eckhart's avatar
eckhart committed
556
557
        super(EBNFCompiler, self).__init__()  # calls the _reset()-method
        self.set_grammar_name(grammar_name, grammar_source)
558

559

560
    def _reset(self):
561
        super(EBNFCompiler, self)._reset()
562
        self._result = ''           # type: str
563
        self.re_flags = set()       # type: Set[str]
564
565
566
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
567
568
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
569
        self.definitions = {}       # type: Dict[str, str]
570
        self.deferred_tasks = []    # type: List[Callable]
571
        self.root_symbol = ""       # type: str
572
573
574
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
575
        self.consumed_skip_rules = set()     # type: Set[str]
576
577
        self.grammar_id += 1

578

Eckhart Arnold's avatar
Eckhart Arnold committed
579
    @property
580
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
581
582
        return self._result

eckhart's avatar
eckhart committed
583
584
585
586
587
588
589
590
591
592
593
594

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
Eckhart Arnold's avatar
Eckhart Arnold committed
595
        self.grammar_name = grammar_name or "NameUnknown"
eckhart's avatar
eckhart committed
596
597
598
599
        self.grammar_source = load_if_file(grammar_source)
        return self


600
    # methods for generating skeleton code for preprocessor, transformer, and compiler
601

602
    def gen_preprocessor_skeleton(self) -> str:
603
604
605
606
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
607
        name = self.grammar_name + "Preprocessor"
608
        return "def %s(text):\n    return text, lambda i: i\n" % name \
609
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
610

611

612
    def gen_transformer_skeleton(self) -> str:
613
614
615
616
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
617
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
618
619
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
620
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
621
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
622
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
623
        transtable.append('    "<": flatten_anonymous_nodes,')
624
        for name in self.rules:
eckhart's avatar
eckhart committed
625
            transformations = '[]'
626
627
628
629
630
            # rule = self.definitions[name]
            # if rule.startswith('Alternative'):
            #     transformations = '[replace_or_reduce]'
            # elif rule.startswith('Synonym'):
            #     transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
631
            transtable.append('    "' + name + '": %s,' % transformations)
632
        # transtable.append('    ":Token": reduce_single_child,')
eckhart's avatar
eckhart committed
633
        transtable += ['    "*": replace_by_single_child', '}', '']
634
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
635
636
        return '\n'.join(transtable)

637

638
    def gen_compiler_skeleton(self) -> str:
639
640
641
642
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
643
        if not self.rules:
644
645
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
646
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
647
648
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
649
                    '    """', '',
eckhart's avatar
eckhart committed
650
651
652
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
653
654
655
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
656
        for name in self.rules:
eckhart's avatar
eckhart committed
657
            method_name = visitor_name(name)
658
            if name == self.root_symbol:
659
                compiler += ['    def ' + method_name + '(self, node):',
660
                             '        return self.fallback_compiler(node)', '']
661
            else:
di68kap's avatar
di68kap committed
662
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
663
                             '    #     return node', '']
664
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
665
        return '\n'.join(compiler)
666

667
    def verify_transformation_table(self, transtable):
668
669
670
671
672
673
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
674
        assert self._dirty_flag
675
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
676
677
678
679
680
681
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
682
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING))
683
684
        return messages

685
686
687
688
689
690
691
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
692

693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
716
        elif value[0] + value[-1] == '//' and value != '//':
717
718
719
720
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
721
    def _gen_search_rule(self, nd: Node) -> ReprType:
722
723
724
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
725
        """
726
        if nd.tag_name == 'regexp':
727
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
728
        elif nd.tag_name == 'literal':
729
730
731
732
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''

733
734
735
736
737
738
739
    def _gen_search_list(self, nodes: Sequence[Node]) -> List[Union[unrepr, str]]:
        search_list = []  # type: List[Union[unrepr, str]]
        for child in nodes:
            rule = self._gen_search_rule(child)
            search_list.append(rule if rule else unrepr(child.content.strip()))
        return search_list

740

741
742
743
744
745
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
746
747
748
749
750
751
752
753
754
755

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

756
757
758
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
759
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
760

761
762
        # add special fields for Grammar class

763
764
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
765
766
767
        if DROP_WSPC in self.directives.drop:
            definitions.append((self.DROP_WHITESPACE_PARSER_KEYWORD,
                                'DropWhitespace(%s)' % self.WHITESPACE_KEYWORD))
768
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
769
770
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
771
772
        definitions.append((self.RAW_WS_KEYWORD, "r'{}'".format(self.directives.whitespace)))
        definitions.append((self.COMMENT_KEYWORD, "r'{}'".format(self.directives.comment)))
773
774
775

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
776
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
777
        for symbol, raw_rules in self.directives.resume.items():
778
779
780
781
782
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
783
                        refined = self._gen_search_rule(nd)
784
785
786
787
788
789
790
791
792
793
794
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
795

eckhart's avatar
eckhart committed
796
797
        # prepare and add customized error-messages

798
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
799
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
800
801
802
803
804
805
806
807
808
809
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

810
811
812
813
814
815
        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_custom_errors:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, 'Customized error message for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
816
817
818
819
820
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)

        # prepare and add skip-rules

        for symbol, skip in self.directives.skip.items():
Eckhart Arnold's avatar
Eckhart Arnold committed
821
            skip_rules = []  # type: List[ReprType]
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
            for search in skip:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                skip_rules.append(search)
            definitions.append((symbol + self.SKIP_RULES_SUFFIX, repr(skip_rules)))

        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_skip_rules:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, '"Skip-rules" for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
839

840
841
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
842

843
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
844
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
845
846
847
848
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
849
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
eckhart's avatar
eckhart committed
850
        definitions.append(('parser_initialization__', '["upon instantiation"]'))
851
        definitions.append(('static_analysis_pending__', 'True'))
852
        if self.grammar_source:
853
            definitions.append(('source_hash__',
854
                                '"%s"' % md5(self.grammar_source, __version__)))
855
            declarations.append('')
856
857
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
858
859
860
861
862
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
863

864
        self.root_symbol = definitions[0][0] if definitions else ""
865
866
867
868
869
870
871
872
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
873
874
875
876
877
878

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
879
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
880
                                    "Missing definition for symbol '%s'" % symbol)
881
                # root_node.error_flag = True
882
883
884

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
885
886
887
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
888
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
889
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
890
891
892
893
894
895
896
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
897
898
899
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
900

901
        # set root_symbol parser and assemble python grammar definition
902

903
904
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
905
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
906
        self._result = '\n    '.join(declarations) \
907
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
908
        return self._result
909

910
911
912

    ## compilation methods

913
    def on_syntax(self, node: Node) -> str:
914
        definitions = []  # type: List[Tuple[str, str]]
915
916

        # drop the wrapping sequence node
917
        if len(node.children) == 1 and node.children[0].is_anonymous():
918
            node = node.children[0]
919
920

        # compile definitions and directives and collect definitions
921
        for nd in node.children:
922
            if nd.tag_name == "definition":
923
                definitions.append(self.compile(nd))
924
            else:
925
                assert nd.tag_name == "directive", nd.as_sxpr()
926
                self.compile(nd)
927
            # node.error_flag = max(node.error_flag, nd.error_flag)
928
        self.definitions.update(definitions)
929

930
        grammar_python_src = self.assemble_parser(definitions, node)
Eckhart Arnold's avatar
Eckhart Arnold committed
931
        if get_config_value('static_analysis') == 'early':
932
            try:
933
934
                grammar_class = compile_python_object(DHPARSER_IMPORTS + grammar_python_src,
                                                      self.grammar_name)
935
                _ = grammar_class()
936
937
                grammar_python_src = grammar_python_src.replace(
                    'static_analysis_pending__ = True', 'static_analysis_pending__ = False', 1)
Eckhart Arnold's avatar
Eckhart Arnold committed
938
939
            except NameError:
                pass  # undefined name in the grammar are already cuaght and reported
940
941
942
943
944
945
            except GrammarError as error:
                for sym, prs, err in error.errors:
                    symdef_node = self.rules[sym][0]
                    err.pos = self.rules[sym][0].pos
                    self.tree.add_error(symdef_node, err)
        return grammar_python_src
946

947

948
    def on_definition(self, node: Node) -> Tuple[str, str]:
949
        rule = node.children[0].content
950
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
951
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
952
            if not first.errors:
eckhart's avatar
eckhart committed
953
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
954
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
955
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
956
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
957
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
958
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
959
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
960
                                ' end with a doube underscore "__".' % rule)
961
        elif rule in self.directives.tokens:
eckhart's avatar
eckhart committed
962
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
963
                                'a preprocessor token.' % rule)
964
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
965
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
966
                                % rule + '(This may change in the future.)')
967
        try:
968
969
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
970
            defn = self.compile(node.children[1])
971
            if rule in self.variables:
972
                defn = 'Capture(%s)' % defn
973
                self.variables.remove(rule)
974
975
976
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
977
        except TypeError as error: