ebnf.py 58.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29
30
import keyword
import os
31

eckhart's avatar
eckhart committed
32
from DHParser.compile import CompilerError, Compiler, compile_source, visitor_name
33
from DHParser.error import Error
34
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
35
36
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
    GrammarError
37
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
38
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
39
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
40
    GLOBALS, CONFIG_PRESET, get_config_value, unrepr, compile_python_object, typing
eckhart's avatar
eckhart committed
41
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
42
    reduce_single_child, replace_by_single_child, remove_expendables, \
eckhart's avatar
eckhart committed
43
    remove_tokens, flatten, forbid, assert_content
44
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
45
from typing import Callable, Dict, List, Set, Tuple, Sequence, Union, Optional, Any, cast
eckhart's avatar
eckhart committed
46

47

48
__all__ = ('get_ebnf_preprocessor',
49
50
51
52
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
53
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
54
           'EBNFCompilerError',
55
           'EBNFCompiler',
56
           'grammar_changed',
eckhart's avatar
eckhart committed
57
           'compile_ebnf',
58
           'PreprocessorFactoryFunc',
59
60
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
61
           'CompilerFactoryFunc')
62
63


64
65
66
67
68
69
70
########################################################################
#
# Presets
#
########################################################################

CONFIG_PRESET['add_grammar_source_to_parser_docstring'] = False
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
CONFIG_PRESET['early_static_analysis'] = True  # do a static analysis right after ebnf compilation


########################################################################
#
# source code support
#
########################################################################


dhparserdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))


DHPARSER_IMPORTS = '''
import collections
from functools import partial
import os
import sys

sys.path.append(r'{dhparserdir}')

try:
    import regex as re
except ImportError:
    import re
from DHParser import logging, is_filename, load_if_file, \\
    Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, DropWhitespace, \\
    Lookbehind, Lookahead, Alternative, Pop, Token, DropToken, Synonym, AllOf, SomeOf, \\
    Unordered, Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \\
    ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \\
    grammar_changed, last_value, counterpart, accumulate, PreprocessorFunc, is_empty, \\
    Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \\
    remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \\
    reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
    remove_expendables, remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \\
    is_expendable, collapse, collapse_if, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
    remove_nodes, remove_content, remove_brackets, replace_parser, remove_anonymous_tokens, \\
    keep_children, is_one_of, not_one_of, has_content, apply_if, remove_first, remove_last, \\
    remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \\
    replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \\
    flatten_anonymous_nodes, error_on, recompile_grammar, GLOBALS
'''.format(dhparserdir=dhparserdir)
113
114


Eckhart Arnold's avatar
Eckhart Arnold committed
115
116
117
118
119
120
121
########################################################################
#
# EBNF scanning
#
########################################################################


122
123
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
124
125
126
127
128
129
130
131


########################################################################
#
# EBNF parsing
#
########################################################################

132

di68kap's avatar
di68kap committed
133
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
134
    r"""
eckhart's avatar
eckhart committed
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
176
    """
di68kap's avatar
di68kap committed
177
    expression = Forward()
eckhart's avatar
eckhart committed
178
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
eckhart's avatar
eckhart committed
179
    parser_initialization__ = ["upon instantiation"]
di68kap's avatar
di68kap committed
180
181
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
182
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
183
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
184
    EOF = NegativeLookahead(RegExp('.'))
185
186
187
188
189
190
191
192
193
194
195
196
197
198
    whitespace = Series(RegExp('~'), wsp__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), wsp__), Series(RegExp("'(?:[^']|\\\\')*?'"), wsp__))
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
199
200
201
202
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
203
204
205
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
206
207
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
208
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
209
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
210
211
212
    root__ = syntax


213
def grammar_changed(grammar_class, grammar_source: str) -> bool:
214
215
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
234
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
235
236
237
238
239
240
241
242
243
244
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


245
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
246
    try:
247
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
248
        return grammar
249
    except AttributeError:
250
251
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
252
253
254
255
256
257
258
259
260


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


261
EBNF_AST_transformation_table = {
262
    # AST Transformations for EBNF-grammar
263
    "<":
264
        remove_expendables,
265
    "syntax":
266
        [],  # otherwise '"*": replace_by_single_child' would be applied
267
    "directive, definition":
eckhart's avatar
eckhart committed
268
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
269
    "expression":
270
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
271
    "term":
272
273
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
274
    "factor, flowmarker, retrieveop":
275
        replace_by_single_child,
276
    "group":
277
        [remove_brackets, replace_by_single_child],
278
279
    "unordered":
        remove_brackets,
280
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
281
        [reduce_single_child, remove_brackets,
282
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
283
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
284
        reduce_single_child,
285
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
286
        reduce_single_child,
eckhart's avatar
eckhart committed
287
288
    # "list_":
    #     [flatten, remove_infix_operator],
289
    "*":
290
        replace_by_single_child
291
292
}

293

Eckhart Arnold's avatar
Eckhart Arnold committed
294
def EBNFTransform() -> TransformationFunc:
295
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
296

eckhart's avatar
eckhart committed
297

298
def get_ebnf_transformer() -> TransformationFunc:
299
    try:
300
        transformer = GLOBALS.EBNF_transformer_singleton
301
    except AttributeError:
302
303
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
304
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
305
306
307
308
309
310
311
312


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

313

314
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
315
ParserFactoryFunc = Callable[[], Grammar]
316
TransformerFactoryFunc = Callable[[], TransformationFunc]
317
318
CompilerFactoryFunc = Callable[[], Compiler]

319
320
321
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
322
323
324
325
'''


GRAMMAR_FACTORY = '''
326
def get_grammar() -> {NAME}Grammar:
eckhart's avatar
eckhart committed
327
    global GLOBALS
328
    try:
329
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
330
    except AttributeError:
331
        GLOBALS.{NAME}_{ID:08d}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
332
        if hasattr(get_grammar, 'python_src__'):
333
334
            GLOBALS.{NAME}_{ID:08d}_grammar_singleton.python_src__ = get_grammar.python_src__
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
335
    return grammar
336
337
338
339
'''


TRANSFORMER_FACTORY = '''
340
341
342
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

343
def get_transformer() -> TransformationFunc:
344
    try:
345
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
346
    except AttributeError:
347
348
        GLOBALS.{NAME}_{ID:08d}_transformer_singleton = {NAME}Transform()
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
349
    return transformer
350
351
352
353
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
354
def get_compiler() -> {NAME}Compiler:
355
    try:
356
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
357
    except AttributeError:
358
359
        GLOBALS.{NAME}_{ID:08d}_compiler_singleton = {NAME}Compiler()
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
360
    return compiler
361
362
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
363

364
365
366
367
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}

368
369
370
DROP_TOKEN  = 'token'
DROP_WSPC   = 'whitespace'
DROP_VALUES = {DROP_TOKEN, DROP_WSPC}
371

eckhart's avatar
eckhart committed
372
373
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
374
375


376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

403
404
405
406
407
408
409
410
411
412
413
414
        skip:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for the series-parser when a mandatory item failed to
                match the following text.

        resume:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for after a parsing error has error occurred. Other
                than the skip field, this configures resuming after
                the failing parser has returned.
415
    """
416
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'skip',
417
                 'resume', 'drop']
eckhart's avatar
eckhart committed
418

419
420
421
422
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
423
        self.tokens = set()   # type: Collection[str]
424
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
425
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
426
        self.skip = dict()    # type: Dict[str, List[Union[unrepr, str]]]
427
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]
428
        self.drop = set()     # type: Set[str]
429
430
431
432
433
434
435
436
437
438
439
440

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

    def keys(self):
        return self.__dict__.keys()


eckhart's avatar
eckhart committed
441
class EBNFCompilerError(CompilerError):
442
    """Error raised by `EBNFCompiler` class. (Not compilation errors
443
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
444
445
446
    pass


447
class EBNFCompiler(Compiler):
448
449
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
450
    in EBNF-Notation.
451
452
453
454
455
456
457

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

458
    Additionally, class EBNFCompiler provides helper methods to generate
459
460
461
462
463
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
464
        current_symbols:  During compilation, a list containing the root
465
466
467
468
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

469
        rules:  Dictionary that maps rule names to a list of Nodes that
470
471
472
473
474
475
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

476
                Now `[node.content for node in self.rules['alternative']]`
477
478
                yields `['alternative = a | b', 'a', 'b']`

479
        symbols:  A mapping of symbol names to their first usage (not
480
481
                their definition!) in the EBNF source.

482
        variables:  A set of symbols names that are used with the
483
484
485
486
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

487
        recursive:  A set of symbols that are used recursively and
488
489
                therefore require a `Forward`-operator.

490
        definitions:  A dictionary of definitions. Other than `rules`
491
492
                this maps the symbols to their compiled definienda.

493
        deferred_tasks:  A list of callables that is filled during
494
495
496
497
498
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

499
        root_symbol: The name of the root symbol.
500

501
502
503
504
505
506
507
508
509
510
511
512
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
513
514
515
516
                `test_ebnf.TestErrorCustomization`.

        consumed_skip_rules: The same as `consumed_custom_errors` only for
                in-series-resume-rules (aka 'skip-rules') for Series-parsers.
517
518
519

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
520

eckhart's avatar
eckhart committed
521
522
523
524
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

525
526
527
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
528
529
    """
    COMMENT_KEYWORD = "COMMENT__"
530
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
531
    RAW_WS_KEYWORD = "WHITESPACE__"
532
    WHITESPACE_PARSER_KEYWORD = "wsp__"
533
    DROP_WHITESPACE_PARSER_KEYWORD = "dwsp__"
534
    RESUME_RULES_KEYWORD = "resume_rules__"
535
    SKIP_RULES_SUFFIX = '_skip__'
eckhart's avatar
eckhart committed
536
537
538
    ERR_MSG_SUFFIX = '_err_msg__'
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD,
                        RESUME_RULES_KEYWORD, ERR_MSG_SUFFIX}
539
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
540
                "Potentially due to erroneous AST transformation."
541
542
543
544
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
545
    REPEATABLE_DIRECTIVES = {'tokens'}
546

547

eckhart's avatar
eckhart committed
548
    def __init__(self, grammar_name="DSL", grammar_source=""):
549
        self.grammar_id = 0
eckhart's avatar
eckhart committed
550
551
        super(EBNFCompiler, self).__init__()  # calls the _reset()-method
        self.set_grammar_name(grammar_name, grammar_source)
552

553

554
    def _reset(self):
555
        super(EBNFCompiler, self)._reset()
556
        self._result = ''           # type: str
557
        self.re_flags = set()       # type: Set[str]
558
559
560
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
561
562
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
563
        self.definitions = {}       # type: Dict[str, str]
564
        self.deferred_tasks = []    # type: List[Callable]
565
        self.root_symbol = ""       # type: str
566
567
568
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
569
        self.consumed_skip_rules = set()     # type: Set[str]
570
571
        self.grammar_id += 1

572

Eckhart Arnold's avatar
Eckhart Arnold committed
573
    @property
574
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
575
576
        return self._result

eckhart's avatar
eckhart committed
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
        self.grammar_name = grammar_name
        self.grammar_source = load_if_file(grammar_source)
        return self


594
    # methods for generating skeleton code for preprocessor, transformer, and compiler
595

596
    def gen_preprocessor_skeleton(self) -> str:
597
598
599
600
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
601
        name = self.grammar_name + "Preprocessor"
602
        return "def %s(text):\n    return text, lambda i: i\n" % name \
603
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
604

605

606
    def gen_transformer_skeleton(self) -> str:
607
608
609
610
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
611
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
612
613
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
614
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
615
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
616
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
617
        transtable.append('    "<": flatten_anonymous_nodes,')
618
        for name in self.rules:
eckhart's avatar
eckhart committed
619
            transformations = '[]'
620
621
622
623
624
            # rule = self.definitions[name]
            # if rule.startswith('Alternative'):
            #     transformations = '[replace_or_reduce]'
            # elif rule.startswith('Synonym'):
            #     transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
625
            transtable.append('    "' + name + '": %s,' % transformations)
626
        # transtable.append('    ":Token": reduce_single_child,')
eckhart's avatar
eckhart committed
627
        transtable += ['    "*": replace_by_single_child', '}', '']
628
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
629
630
        return '\n'.join(transtable)

631

632
    def gen_compiler_skeleton(self) -> str:
633
634
635
636
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
637
        if not self.rules:
638
639
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
640
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
641
642
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
643
                    '    """', '',
eckhart's avatar
eckhart committed
644
645
646
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
647
648
649
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
650
        for name in self.rules:
eckhart's avatar
eckhart committed
651
            method_name = visitor_name(name)
652
            if name == self.root_symbol:
653
                compiler += ['    def ' + method_name + '(self, node):',
654
                             '        return self.fallback_compiler(node)', '']
655
            else:
di68kap's avatar
di68kap committed
656
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
657
                             '    #     return node', '']
658
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
659
        return '\n'.join(compiler)
660

661
    def verify_transformation_table(self, transtable):
662
663
664
665
666
667
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
668
        assert self._dirty_flag
669
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
670
671
672
673
674
675
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
676
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING))
677
678
        return messages

679
680
681
682
683
684
685
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
686

687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
710
        elif value[0] + value[-1] == '//' and value != '//':
711
712
713
714
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
715
    def _gen_search_rule(self, nd: Node) -> ReprType:
716
717
718
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
719
        """
720
        if nd.tag_name == 'regexp':
721
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
722
        elif nd.tag_name == 'literal':
723
724
725
726
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''

727
728
729
730
731
732
733
    def _gen_search_list(self, nodes: Sequence[Node]) -> List[Union[unrepr, str]]:
        search_list = []  # type: List[Union[unrepr, str]]
        for child in nodes:
            rule = self._gen_search_rule(child)
            search_list.append(rule if rule else unrepr(child.content.strip()))
        return search_list

734

735
736
737
738
739
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
740
741
742
743
744
745
746
747
748
749

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

750
751
752
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
753
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
754

755
756
        # add special fields for Grammar class

757
758
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
759
760
761
        if DROP_WSPC in self.directives.drop:
            definitions.append((self.DROP_WHITESPACE_PARSER_KEYWORD,
                                'DropWhitespace(%s)' % self.WHITESPACE_KEYWORD))
762
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
763
764
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
765
766
        definitions.append((self.RAW_WS_KEYWORD, "r'{}'".format(self.directives.whitespace)))
        definitions.append((self.COMMENT_KEYWORD, "r'{}'".format(self.directives.comment)))
767
768
769

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
770
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
771
        for symbol, raw_rules in self.directives.resume.items():
772
773
774
775
776
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
777
                        refined = self._gen_search_rule(nd)
778
779
780
781
782
783
784
785
786
787
788
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
789

eckhart's avatar
eckhart committed
790
791
        # prepare and add customized error-messages

792
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
793
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
794
795
796
797
798
799
800
801
802
803
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

804
805
806
807
808
809
        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_custom_errors:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, 'Customized error message for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
810
811
812
813
814
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)

        # prepare and add skip-rules

        for symbol, skip in self.directives.skip.items():
Eckhart Arnold's avatar
Eckhart Arnold committed
815
            skip_rules = []  # type: List[ReprType]
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
            for search in skip:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                skip_rules.append(search)
            definitions.append((symbol + self.SKIP_RULES_SUFFIX, repr(skip_rules)))

        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_skip_rules:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, '"Skip-rules" for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
833

834
835
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
836

837
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
838
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
839
840
841
842
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
843
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
eckhart's avatar
eckhart committed
844
        definitions.append(('parser_initialization__', '["upon instantiation"]'))
845
        definitions.append(('static_analysis_pending__', 'True'))
846
        if self.grammar_source:
847
            definitions.append(('source_hash__',
848
                                '"%s"' % md5(self.grammar_source, __version__)))
849
            declarations.append('')
850
851
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
852
853
854
855
856
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
857

858
        self.root_symbol = definitions[0][0] if definitions else ""
859
860
861
862
863
864
865
866
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
867
868
869
870
871
872

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
873
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
874
                                    "Missing definition for symbol '%s'" % symbol)
875
                # root_node.error_flag = True
876
877
878

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
879
880
881
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
882
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
883
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
884
885
886
887
888
889
890
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
891
892
893
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
894

895
        # set root_symbol parser and assemble python grammar definition
896

897
898
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
899
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
900
        self._result = '\n    '.join(declarations) \
901
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
902
        return self._result
903

904
905
906

    ## compilation methods

907
    def on_syntax(self, node: Node) -> str:
908
        definitions = []  # type: List[Tuple[str, str]]
909
910

        # drop the wrapping sequence node
911
        if len(node.children) == 1 and node.children[0].is_anonymous():
912
            node = node.children[0]
913
914

        # compile definitions and directives and collect definitions
915
        for nd in node.children:
916
            if nd.tag_name == "definition":
917
                definitions.append(self.compile(nd))
918
            else:
919
                assert nd.tag_name == "directive", nd.as_sxpr()
920
                self.compile(nd)
921
            # node.error_flag = max(node.error_flag, nd.error_flag)
922
        self.definitions.update(definitions)
923

924
925
926
927
928
929
930
931
932
933
934
        grammar_python_src = self.assemble_parser(definitions, node)
        if get_config_value('early_static_analysis'):
            grammar_class = compile_python_object(DHPARSER_IMPORTS + grammar_python_src, self.grammar_name)
            try:
                _ = grammar_class()
            except GrammarError as error:
                for sym, prs, err in error.errors:
                    symdef_node = self.rules[sym][0]
                    err.pos = self.rules[sym][0].pos
                    self.tree.add_error(symdef_node, err)
        return grammar_python_src
935

936

937
    def on_definition(self, node: Node) -> Tuple[str, str]:
938
        rule = node.children[0].content
939
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
940
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
941
            if not first.errors:
eckhart's avatar
eckhart committed
942
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
943
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
944
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
945
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
946
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
947
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
948
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
949
                                ' end with a doube underscore "__".' % rule)
950
        elif rule in self.directives.tokens:
eckhart's avatar
eckhart committed
951
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
952
                                'a preprocessor token.' % rule)
953
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
954
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
955
                                % rule + '(This may change in the future.)')
956
        try:
957
958
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
959
            defn = self.compile(node.children[1])
960
            if rule in self.variables:
961
                defn = 'Capture(%s)' % defn
962
                self.variables.remove(rule)
963
964
965
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
966
        except TypeError as error:
967
968
969
970
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
971
            self.tree.new_error(node, errmsg)
972
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
973
        return rule, defn
974

975

976
    def on_directive(self, node: Node) -> str:
977
        key = node.children[0].content
978
        assert key not in self.directives.tokens
979

980
        if key not in self.REPEATABLE_DIRECTIVES and not key.endswith('_error'):
981
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
982
                self.tree.new_error(node, 'Directive "%s" has already been defined earlier. '
eckhart's avatar
eckhart committed
983
                                    % key + 'Later definition will be ignored!',
984
                                    code=Error.REDEFINED_DIRECTIVE)
985
986
987
                return ""
            self.defined_directives.add(key)