ebnf.py 58.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29
30
import keyword
import os
Eckhart Arnold's avatar
Eckhart Arnold committed
31
from typing import Callable, Dict, List, Set, Tuple, Sequence, Union, Optional, Any, cast
32

eckhart's avatar
eckhart committed
33
from DHParser.compile import CompilerError, Compiler, compile_source, visitor_name
34
from DHParser.error import Error
35
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, DropWhitespace, \
36
37
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
    GrammarError
38
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
39
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
40
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
Eckhart Arnold's avatar
Eckhart Arnold committed
41
    GLOBALS, get_config_value, unrepr, compile_python_object
eckhart's avatar
eckhart committed
42
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
43
    reduce_single_child, replace_by_single_child, remove_whitespace, remove_empty, \
eckhart's avatar
eckhart committed
44
    remove_tokens, flatten, forbid, assert_content
45
from DHParser.versionnumber import __version__
Eckhart Arnold's avatar
Eckhart Arnold committed
46

eckhart's avatar
eckhart committed
47

48

49
__all__ = ('get_ebnf_preprocessor',
50
51
52
53
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
54
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
55
           'EBNFCompilerError',
56
           'EBNFCompiler',
57
           'grammar_changed',
eckhart's avatar
eckhart committed
58
           'compile_ebnf',
59
           'PreprocessorFactoryFunc',
60
61
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
62
           'CompilerFactoryFunc')
63
64


65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
########################################################################
#
# source code support
#
########################################################################


dhparserdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))


DHPARSER_IMPORTS = '''
import collections
from functools import partial
import os
import sys

sys.path.append(r'{dhparserdir}')

try:
    import regex as re
except ImportError:
    import re
from DHParser import logging, is_filename, load_if_file, \\
    Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, DropWhitespace, \\
    Lookbehind, Lookahead, Alternative, Pop, Token, DropToken, Synonym, AllOf, SomeOf, \\
    Unordered, Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \\
    ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \\
    grammar_changed, last_value, counterpart, accumulate, PreprocessorFunc, is_empty, \\
    Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \\
    remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \\
    reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
Eckhart Arnold's avatar
Eckhart Arnold committed
96
97
98
    remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \\
    collapse, collapse_if, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
    remove_nodes, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \\
99
100
101
    keep_children, is_one_of, not_one_of, has_content, apply_if, remove_first, remove_last, \\
    remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \\
    replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \\
102
    error_on, recompile_grammar, left_associative, lean_left, GLOBALS
103
'''.format(dhparserdir=dhparserdir)
104
105


Eckhart Arnold's avatar
Eckhart Arnold committed
106
107
108
109
110
111
112
########################################################################
#
# EBNF scanning
#
########################################################################


113
114
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
115
116
117
118
119
120
121
122


########################################################################
#
# EBNF parsing
#
########################################################################

123

di68kap's avatar
di68kap committed
124
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
125
    r"""
eckhart's avatar
eckhart committed
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
167
    """
di68kap's avatar
di68kap committed
168
    expression = Forward()
eckhart's avatar
eckhart committed
169
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
170
    static_analysis_pending__ = []
eckhart's avatar
eckhart committed
171
    parser_initialization__ = ["upon instantiation"]
di68kap's avatar
di68kap committed
172
173
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
174
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
175
    wsp__ = DropWhitespace(WSP_RE__)
di68kap's avatar
di68kap committed
176
    EOF = NegativeLookahead(RegExp('.'))
177
    whitespace = Series(RegExp('~'), wsp__)
178
179
180
181
    regexp = Series(RegExp('/(?:(?<!\\\\)\\\\(?:/)|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:(?<!\\\\)\\\\`|[^"])*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:(?<!\\\\)\\\\"|[^"])*?"'), wsp__),
                          Series(RegExp("'(?:(?<!\\\\)\\\\'|[^'])*?'"), wsp__))
182
183
184
185
186
187
188
189
190
191
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
192
193
194
195
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
196
197
198
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
199
200
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
201
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
202
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
203
204
205
    root__ = syntax


206
def grammar_changed(grammar_class, grammar_source: str) -> bool:
207
208
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
227
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
228
229
230
231
232
233
234
235
236
237
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


238
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
239
    try:
240
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
241
        return grammar
242
    except AttributeError:
243
244
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
245
246
247
248
249
250
251
252
253


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


254
EBNF_AST_transformation_table = {
255
    # AST Transformations for EBNF-grammar
256
    "<":
257
        [remove_empty],  # remove_whitespace
258
    "syntax":
259
        [],  # otherwise '"*": replace_by_single_child' would be applied
260
    "directive, definition":
eckhart's avatar
eckhart committed
261
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
262
    "expression":
263
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
264
    "term":
265
266
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
267
    "factor, flowmarker, retrieveop":
268
        replace_by_single_child,
269
    "group":
270
        [remove_brackets, replace_by_single_child],
271
272
    "unordered":
        remove_brackets,
273
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
274
        [reduce_single_child, remove_brackets,
275
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
276
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
277
        reduce_single_child,
278
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
279
        reduce_single_child,
280
    "*":
281
        replace_by_single_child
282
283
}

284

Eckhart Arnold's avatar
Eckhart Arnold committed
285
def EBNFTransform() -> TransformationFunc:
286
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
287

eckhart's avatar
eckhart committed
288

289
def get_ebnf_transformer() -> TransformationFunc:
290
    try:
291
        transformer = GLOBALS.EBNF_transformer_singleton
292
    except AttributeError:
293
294
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
295
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
296
297
298
299
300
301
302
303


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

304

305
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
306
ParserFactoryFunc = Callable[[], Grammar]
307
TransformerFactoryFunc = Callable[[], TransformationFunc]
308
309
CompilerFactoryFunc = Callable[[], Compiler]

310
311
312
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
313
314
315
316
'''


GRAMMAR_FACTORY = '''
317
def get_grammar() -> {NAME}Grammar:
318
    """Returns a thread/process-exclusive {NAME}Grammar-singleton."""
319
    try:
320
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
321
    except AttributeError:
322
        GLOBALS.{NAME}_{ID:08d}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
323
        if hasattr(get_grammar, 'python_src__'):
324
325
            GLOBALS.{NAME}_{ID:08d}_grammar_singleton.python_src__ = get_grammar.python_src__
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
326
    return grammar
327
328
329
330
'''


TRANSFORMER_FACTORY = '''
331
332
333
def Create{NAME}Transformer() -> TransformationFunc:
    """Creates a transformation function that does not share state with other
    threads or processes."""
334
335
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

336
def get_transformer() -> TransformationFunc:
337
    """Returns a thread/process-exclusive transformation function."""
338
    try:
339
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
340
    except AttributeError:
341
        GLOBALS.{NAME}_{ID:08d}_transformer_singleton = Create{NAME}Transformer()
342
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
343
    return transformer
344
345
346
347
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
348
def get_compiler() -> {NAME}Compiler:
349
    """Returns a thread/process-exclusive {NAME}Compiler-singleton."""
350
    try:
351
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
352
    except AttributeError:
353
354
        GLOBALS.{NAME}_{ID:08d}_compiler_singleton = {NAME}Compiler()
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
355
    return compiler
356
357
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
358

359
360
361
362
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}

363
364
365
DROP_TOKEN  = 'token'
DROP_WSPC   = 'whitespace'
DROP_VALUES = {DROP_TOKEN, DROP_WSPC}
366

eckhart's avatar
eckhart committed
367
368
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
369
370


371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

398
399
400
401
402
403
404
405
406
407
408
409
        skip:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for the series-parser when a mandatory item failed to
                match the following text.

        resume:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for after a parsing error has error occurred. Other
                than the skip field, this configures resuming after
                the failing parser has returned.
410
    """
411
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'skip',
412
                 'resume', 'drop']
eckhart's avatar
eckhart committed
413

414
415
416
417
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
418
        self.tokens = set()   # type: Collection[str]
419
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
420
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
421
        self.skip = dict()    # type: Dict[str, List[Union[unrepr, str]]]
422
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]
423
        self.drop = set()     # type: Set[str]
424
425
426
427
428
429
430
431
432
433
434
435

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

    def keys(self):
        return self.__dict__.keys()


eckhart's avatar
eckhart committed
436
class EBNFCompilerError(CompilerError):
437
    """Error raised by `EBNFCompiler` class. (Not compilation errors
438
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
439
440
441
    pass


442
class EBNFCompiler(Compiler):
443
444
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
445
    in EBNF-Notation.
446
447
448
449
450
451
452

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

453
    Additionally, class EBNFCompiler provides helper methods to generate
454
455
456
457
458
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
459
        current_symbols:  During compilation, a list containing the root
460
461
462
463
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

464
        rules:  Dictionary that maps rule names to a list of Nodes that
465
466
467
468
469
470
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

471
                Now `[node.content for node in self.rules['alternative']]`
472
473
                yields `['alternative = a | b', 'a', 'b']`

474
        symbols:  A mapping of symbol names to their first usage (not
475
476
                their definition!) in the EBNF source.

477
        variables:  A set of symbols names that are used with the
478
479
480
481
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

482
        recursive:  A set of symbols that are used recursively and
483
484
                therefore require a `Forward`-operator.

485
        definitions:  A dictionary of definitions. Other than `rules`
486
487
                this maps the symbols to their compiled definienda.

488
        deferred_tasks:  A list of callables that is filled during
489
490
491
492
493
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

494
        root_symbol: The name of the root symbol.
495

496
497
498
499
500
501
502
503
504
505
506
507
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
508
509
510
511
                `test_ebnf.TestErrorCustomization`.

        consumed_skip_rules: The same as `consumed_custom_errors` only for
                in-series-resume-rules (aka 'skip-rules') for Series-parsers.
512
513
514

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
515

eckhart's avatar
eckhart committed
516
517
518
519
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

520
521
522
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
523
524
    """
    COMMENT_KEYWORD = "COMMENT__"
525
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
526
    RAW_WS_KEYWORD = "WHITESPACE__"
527
    WHITESPACE_PARSER_KEYWORD = "wsp__"
528
    DROP_WHITESPACE_PARSER_KEYWORD = "dwsp__"
529
    RESUME_RULES_KEYWORD = "resume_rules__"
530
    SKIP_RULES_SUFFIX = '_skip__'
eckhart's avatar
eckhart committed
531
532
533
    ERR_MSG_SUFFIX = '_err_msg__'
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD,
                        RESUME_RULES_KEYWORD, ERR_MSG_SUFFIX}
534
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
535
                "Potentially due to erroneous AST transformation."
536
537
538
539
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
540
    REPEATABLE_DIRECTIVES = {'tokens'}
541

542

eckhart's avatar
eckhart committed
543
    def __init__(self, grammar_name="DSL", grammar_source=""):
544
        self.grammar_id = 0
eckhart's avatar
eckhart committed
545
546
        super(EBNFCompiler, self).__init__()  # calls the _reset()-method
        self.set_grammar_name(grammar_name, grammar_source)
547

548

549
    def _reset(self):
550
        super(EBNFCompiler, self)._reset()
551
        self._result = ''           # type: str
552
        self.re_flags = set()       # type: Set[str]
553
554
555
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
556
557
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
558
        self.definitions = {}       # type: Dict[str, str]
559
        self.deferred_tasks = []    # type: List[Callable]
560
        self.root_symbol = ""       # type: str
561
562
563
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
564
        self.consumed_skip_rules = set()     # type: Set[str]
565
566
        self.grammar_id += 1

567

Eckhart Arnold's avatar
Eckhart Arnold committed
568
    @property
569
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
570
571
        return self._result

eckhart's avatar
eckhart committed
572
573
574
575
576
577
578
579
580
581
582
583

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
Eckhart Arnold's avatar
Eckhart Arnold committed
584
        self.grammar_name = grammar_name or "NameUnknown"
eckhart's avatar
eckhart committed
585
586
587
588
        self.grammar_source = load_if_file(grammar_source)
        return self


589
    # methods for generating skeleton code for preprocessor, transformer, and compiler
590

591
    def gen_preprocessor_skeleton(self) -> str:
592
593
594
595
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
596
        name = self.grammar_name + "Preprocessor"
597
        return "def %s(text):\n    return text, lambda i: i\n" % name \
598
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
599

600

601
    def gen_transformer_skeleton(self) -> str:
602
603
604
605
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
606
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
607
608
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
609
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
610
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
611
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
612
        transtable.append('    "<": flatten,')
613
        for name in self.rules:
eckhart's avatar
eckhart committed
614
            transformations = '[]'
615
616
617
618
619
            # rule = self.definitions[name]
            # if rule.startswith('Alternative'):
            #     transformations = '[replace_or_reduce]'
            # elif rule.startswith('Synonym'):
            #     transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
620
            transtable.append('    "' + name + '": %s,' % transformations)
621
        # transtable.append('    ":Token": reduce_single_child,')
eckhart's avatar
eckhart committed
622
        transtable += ['    "*": replace_by_single_child', '}', '']
623
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
624
625
        return '\n'.join(transtable)

626

627
    def gen_compiler_skeleton(self) -> str:
628
629
630
631
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
632
        if not self.rules:
633
634
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
635
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
636
637
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
638
                    '    """', '',
eckhart's avatar
eckhart committed
639
640
641
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
642
643
644
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
645
        for name in self.rules:
eckhart's avatar
eckhart committed
646
            method_name = visitor_name(name)
647
            if name == self.root_symbol:
648
                compiler += ['    def ' + method_name + '(self, node):',
649
                             '        return self.fallback_compiler(node)', '']
650
            else:
di68kap's avatar
di68kap committed
651
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
652
                             '    #     return node', '']
653
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
654
        return '\n'.join(compiler)
655

656
    def verify_transformation_table(self, transtable):
657
658
659
660
661
662
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
663
        assert self._dirty_flag
664
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
665
666
667
668
669
670
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
671
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING))
672
673
        return messages

674
675
676
677
678
679
680
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
681

682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
705
        elif value[0] + value[-1] == '//' and value != '//':
706
707
708
709
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
710
    def _gen_search_rule(self, nd: Node) -> ReprType:
711
712
713
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
714
        """
715
        if nd.tag_name == 'regexp':
716
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
717
        elif nd.tag_name == 'literal':
718
719
720
721
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''

722
723
724
725
726
727
728
    def _gen_search_list(self, nodes: Sequence[Node]) -> List[Union[unrepr, str]]:
        search_list = []  # type: List[Union[unrepr, str]]
        for child in nodes:
            rule = self._gen_search_rule(child)
            search_list.append(rule if rule else unrepr(child.content.strip()))
        return search_list

729

730
731
732
733
734
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
735
736
737
738
739
740
741
742
743
744

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

745
746
747
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
748
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
749

750
751
        # add special fields for Grammar class

752
753
754
        if DROP_WSPC in self.directives.drop:
            definitions.append((self.DROP_WHITESPACE_PARSER_KEYWORD,
                                'DropWhitespace(%s)' % self.WHITESPACE_KEYWORD))
755
756
757
        else:
            definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                                'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
758
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
759
760
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
761
762
        definitions.append((self.RAW_WS_KEYWORD, "r'{}'".format(self.directives.whitespace)))
        definitions.append((self.COMMENT_KEYWORD, "r'{}'".format(self.directives.comment)))
763
764
765

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
766
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
767
        for symbol, raw_rules in self.directives.resume.items():
768
769
770
771
772
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
773
                        refined = self._gen_search_rule(nd)
774
775
776
777
778
779
780
781
782
783
784
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
785

eckhart's avatar
eckhart committed
786
787
        # prepare and add customized error-messages

788
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
789
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
790
791
792
793
794
795
796
797
798
799
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

800
801
802
803
804
805
        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_custom_errors:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, 'Customized error message for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
806
807
808
809
810
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)

        # prepare and add skip-rules

        for symbol, skip in self.directives.skip.items():
Eckhart Arnold's avatar
Eckhart Arnold committed
811
            skip_rules = []  # type: List[ReprType]
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
            for search in skip:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                skip_rules.append(search)
            definitions.append((symbol + self.SKIP_RULES_SUFFIX, repr(skip_rules)))

        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_skip_rules:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, '"Skip-rules" for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
829

830
831
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
832

833
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
834
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
835
836
837
838
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
839
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
eckhart's avatar
eckhart committed
840
        definitions.append(('parser_initialization__', '["upon instantiation"]'))
841
        definitions.append(('static_analysis_pending__', '[True]'))
842
        if self.grammar_source:
843
            definitions.append(('source_hash__',
844
                                '"%s"' % md5(self.grammar_source, __version__)))
845
            declarations.append('')
846
847
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
848
849
850
851
852
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
853

854
        self.root_symbol = definitions[0][0] if definitions else ""
855
856
857
858
859
860
861
862
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
863
864
865
866
867
868

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
869
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
870
                                    "Missing definition for symbol '%s'" % symbol)
871
                # root_node.error_flag = True
872
873
874

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
875
876
877
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
878
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
879
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
880
881
882
883
884
885
886
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
887
888
889
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
890

891
        # set root_symbol parser and assemble python grammar definition
892

893
894
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
895
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
896
        self._result = '\n    '.join(declarations) \
897
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
898
        return self._result
899

900
901
902

    ## compilation methods

903
    def on_syntax(self, node: Node) -> str:
904
        definitions = []  # type: List[Tuple[str, str]]
905
906

        # drop the wrapping sequence node
907
        if len(node.children) == 1 and node.children[0].is_anonymous():
908
            node = node.children[0]
909
910

        # compile definitions and directives and collect definitions
911
        for nd in node.children:
912
            if nd.tag_name == "definition":
913
                definitions.append(self.compile(nd))
914
            else:
915
                assert nd.tag_name == "directive", nd.as_sxpr()
916
                self.compile(nd)
917
            # node.error_flag = max(node.error_flag, nd.error_flag)
918
        self.definitions.update(definitions)
919

920
        grammar_python_src = self.assemble_parser(definitions, node)
921
922
923
924
925
926
927
928
929
930
931
932
933
934
        if get_config_value('static_analysis') == 'early':
            try:
                grammar_class = compile_python_object(DHPARSER_IMPORTS + grammar_python_src,
                                                      self.grammar_name)
                _ = grammar_class()
                grammar_python_src = grammar_python_src.replace(
                    'static_analysis_pending__ = [True]', 'static_analysis_pending__ = []', 1)
            except NameError:
                pass  # undefined name in the grammar are already caught and reported
            except GrammarError as error:
                for sym, prs, err in error.errors:
                    symdef_node = self.rules[sym][0]
                    err.pos = self.rules[sym][0].pos
                    self.tree.add_error(symdef_node, err)
935
        return grammar_python_src
936

937

938
    def on_definition(self, node: Node) -> Tuple[str, str]:
939
        rule = node.children[0].content
940
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
941
            first = self.rules[rule][0]
942
            if not id(first) in self.tree.error_nodes:
eckhart's avatar
eckhart committed
943
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
944
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
945
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
946
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
947
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
948
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
949
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
950
                                ' end with a doube underscore "__".' % rule)
951
        elif rule in self.directives.tokens:
eckhart's avatar
eckhart committed
952
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
953
                                'a preprocessor token.' % rule)
954
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
955
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
956
                                % rule + '(This may change in the future.)')
957
        try:
958
959
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
960
            defn = self.compile(node.children[1])
961
            if rule in self.variables:
962
                defn = 'Capture(%s)' % defn
963
                self.variables.remove(rule)
964
965
966
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
967
        except TypeError as error:
968
969
970
971
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
972
            self.tree.new_error(node, errmsg)
973
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
974
        return rule, defn
975

976