The name of the initial branch for new projects is now "main" instead of "master". Existing projects remain unchanged. More information: https://doku.lrz.de/display/PUBLIC/GitLab

ebnf.py 58.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17 18


19 20 21 22 23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24 25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29 30
import keyword
import os
Eckhart Arnold's avatar
Eckhart Arnold committed
31
from typing import Callable, Dict, List, Set, Tuple, Sequence, Union, Optional, Any, cast
32

eckhart's avatar
eckhart committed
33
from DHParser.compile import CompilerError, Compiler, compile_source, visitor_name
34
from DHParser.error import Error
35
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
36 37
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
    GrammarError
38
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
39
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
40
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
Eckhart Arnold's avatar
Eckhart Arnold committed
41
    GLOBALS, get_config_value, unrepr, compile_python_object
eckhart's avatar
eckhart committed
42
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
43
    reduce_single_child, replace_by_single_child, remove_whitespace, remove_empty, \
eckhart's avatar
eckhart committed
44
    remove_tokens, flatten, forbid, assert_content
45
from DHParser.versionnumber import __version__
Eckhart Arnold's avatar
Eckhart Arnold committed
46

eckhart's avatar
eckhart committed
47

48

49
__all__ = ('get_ebnf_preprocessor',
50 51 52 53
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
54
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
55
           'EBNFCompilerError',
56
           'EBNFCompiler',
57
           'grammar_changed',
eckhart's avatar
eckhart committed
58
           'compile_ebnf',
59
           'PreprocessorFactoryFunc',
60 61
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
62
           'CompilerFactoryFunc')
63 64


65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
########################################################################
#
# source code support
#
########################################################################


dhparserdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))


DHPARSER_IMPORTS = '''
import collections
from functools import partial
import os
import sys

sys.path.append(r'{dhparserdir}')

try:
    import regex as re
except ImportError:
    import re
from DHParser import logging, is_filename, load_if_file, \\
    Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, DropWhitespace, \\
    Lookbehind, Lookahead, Alternative, Pop, Token, DropToken, Synonym, AllOf, SomeOf, \\
    Unordered, Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \\
    ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \\
    grammar_changed, last_value, counterpart, accumulate, PreprocessorFunc, is_empty, \\
    Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \\
    remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \\
    reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
Eckhart Arnold's avatar
Eckhart Arnold committed
96 97 98
    remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \\
    collapse, collapse_if, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
    remove_nodes, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \\
99 100 101
    keep_children, is_one_of, not_one_of, has_content, apply_if, remove_first, remove_last, \\
    remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \\
    replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \\
102
    error_on, recompile_grammar, left_associative, swing_left, GLOBALS
103
'''.format(dhparserdir=dhparserdir)
104 105


Eckhart Arnold's avatar
Eckhart Arnold committed
106 107 108 109 110 111 112
########################################################################
#
# EBNF scanning
#
########################################################################


113 114
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
115 116 117 118 119 120 121 122


########################################################################
#
# EBNF parsing
#
########################################################################

123

di68kap's avatar
di68kap committed
124
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
125
    r"""
eckhart's avatar
eckhart committed
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
167
    """
di68kap's avatar
di68kap committed
168
    expression = Forward()
eckhart's avatar
eckhart committed
169
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
170
    static_analysis_pending__ = []
eckhart's avatar
eckhart committed
171
    parser_initialization__ = ["upon instantiation"]
di68kap's avatar
di68kap committed
172 173
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
174
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
175
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
176
    EOF = NegativeLookahead(RegExp('.'))
177
    whitespace = Series(RegExp('~'), wsp__)
178 179 180 181
    regexp = Series(RegExp('/(?:(?<!\\\\)\\\\(?:/)|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:(?<!\\\\)\\\\`|[^"])*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:(?<!\\\\)\\\\"|[^"])*?"'), wsp__),
                          Series(RegExp("'(?:(?<!\\\\)\\\\'|[^'])*?'"), wsp__))
182 183 184 185 186 187 188 189 190 191
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
192 193 194 195
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
196 197 198
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
199 200
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
201
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
202
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
203 204 205
    root__ = syntax


206
def grammar_changed(grammar_class, grammar_source: str) -> bool:
207 208
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
227
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
228 229 230 231 232 233 234 235 236 237
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


238
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
239
    try:
240
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
241
        return grammar
242
    except AttributeError:
243 244
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
245 246 247 248 249 250 251 252 253


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


254
EBNF_AST_transformation_table = {
255
    # AST Transformations for EBNF-grammar
256
    "<":
Eckhart Arnold's avatar
Eckhart Arnold committed
257
        [remove_whitespace, remove_empty],
258
    "syntax":
259
        [],  # otherwise '"*": replace_by_single_child' would be applied
260
    "directive, definition":
eckhart's avatar
eckhart committed
261
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
262
    "expression":
263
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
264
    "term":
265 266
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
267
    "factor, flowmarker, retrieveop":
268
        replace_by_single_child,
269
    "group":
270
        [remove_brackets, replace_by_single_child],
271 272
    "unordered":
        remove_brackets,
273
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
274
        [reduce_single_child, remove_brackets,
275
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
276
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
277
        reduce_single_child,
278
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
279
        reduce_single_child,
eckhart's avatar
eckhart committed
280 281
    # "list_":
    #     [flatten, remove_infix_operator],
282
    "*":
283
        replace_by_single_child
284 285
}

286

Eckhart Arnold's avatar
Eckhart Arnold committed
287
def EBNFTransform() -> TransformationFunc:
288
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
289

eckhart's avatar
eckhart committed
290

291
def get_ebnf_transformer() -> TransformationFunc:
292
    try:
293
        transformer = GLOBALS.EBNF_transformer_singleton
294
    except AttributeError:
295 296
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
297
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
298 299 300 301 302 303 304 305


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

306

307
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
308
ParserFactoryFunc = Callable[[], Grammar]
309
TransformerFactoryFunc = Callable[[], TransformationFunc]
310 311
CompilerFactoryFunc = Callable[[], Compiler]

312 313 314
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
315 316 317 318
'''


GRAMMAR_FACTORY = '''
319
def get_grammar() -> {NAME}Grammar:
eckhart's avatar
eckhart committed
320
    global GLOBALS
321
    try:
322
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
323
    except AttributeError:
324
        GLOBALS.{NAME}_{ID:08d}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
325
        if hasattr(get_grammar, 'python_src__'):
326 327
            GLOBALS.{NAME}_{ID:08d}_grammar_singleton.python_src__ = get_grammar.python_src__
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
328
    return grammar
329 330 331 332
'''


TRANSFORMER_FACTORY = '''
333
def {NAME}Transform() -> TransformationFunc:
334 335
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

336
def get_transformer() -> TransformationFunc:
337
    try:
338
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
339
    except AttributeError:
340 341
        GLOBALS.{NAME}_{ID:08d}_transformer_singleton = {NAME}Transform()
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
342
    return transformer
343 344 345 346
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
347
def get_compiler() -> {NAME}Compiler:
348
    try:
349
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
350
    except AttributeError:
351 352
        GLOBALS.{NAME}_{ID:08d}_compiler_singleton = {NAME}Compiler()
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
353
    return compiler
354 355
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
356

357 358 359 360
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}

361 362 363
DROP_TOKEN  = 'token'
DROP_WSPC   = 'whitespace'
DROP_VALUES = {DROP_TOKEN, DROP_WSPC}
364

eckhart's avatar
eckhart committed
365 366
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
367 368


369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

396 397 398 399 400 401 402 403 404 405 406 407
        skip:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for the series-parser when a mandatory item failed to
                match the following text.

        resume:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for after a parsing error has error occurred. Other
                than the skip field, this configures resuming after
                the failing parser has returned.
408
    """
409
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'skip',
410
                 'resume', 'drop']
eckhart's avatar
eckhart committed
411

412 413 414 415
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
416
        self.tokens = set()   # type: Collection[str]
417
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
418
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
419
        self.skip = dict()    # type: Dict[str, List[Union[unrepr, str]]]
420
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]
421
        self.drop = set()     # type: Set[str]
422 423 424 425 426 427 428 429 430 431 432 433

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

    def keys(self):
        return self.__dict__.keys()


eckhart's avatar
eckhart committed
434
class EBNFCompilerError(CompilerError):
435
    """Error raised by `EBNFCompiler` class. (Not compilation errors
436
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
437 438 439
    pass


440
class EBNFCompiler(Compiler):
441 442
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
443
    in EBNF-Notation.
444 445 446 447 448 449 450

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

451
    Additionally, class EBNFCompiler provides helper methods to generate
452 453 454 455 456
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
457
        current_symbols:  During compilation, a list containing the root
458 459 460 461
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

462
        rules:  Dictionary that maps rule names to a list of Nodes that
463 464 465 466 467 468
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

469
                Now `[node.content for node in self.rules['alternative']]`
470 471
                yields `['alternative = a | b', 'a', 'b']`

472
        symbols:  A mapping of symbol names to their first usage (not
473 474
                their definition!) in the EBNF source.

475
        variables:  A set of symbols names that are used with the
476 477 478 479
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

480
        recursive:  A set of symbols that are used recursively and
481 482
                therefore require a `Forward`-operator.

483
        definitions:  A dictionary of definitions. Other than `rules`
484 485
                this maps the symbols to their compiled definienda.

486
        deferred_tasks:  A list of callables that is filled during
487 488 489 490 491
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

492
        root_symbol: The name of the root symbol.
493

494 495 496 497 498 499 500 501 502 503 504 505
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
506 507 508 509
                `test_ebnf.TestErrorCustomization`.

        consumed_skip_rules: The same as `consumed_custom_errors` only for
                in-series-resume-rules (aka 'skip-rules') for Series-parsers.
510 511 512

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
513

eckhart's avatar
eckhart committed
514 515 516 517
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

518 519 520
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
521 522
    """
    COMMENT_KEYWORD = "COMMENT__"
523
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
524
    RAW_WS_KEYWORD = "WHITESPACE__"
525
    WHITESPACE_PARSER_KEYWORD = "wsp__"
526
    DROP_WHITESPACE_PARSER_KEYWORD = "dwsp__"
527
    RESUME_RULES_KEYWORD = "resume_rules__"
528
    SKIP_RULES_SUFFIX = '_skip__'
eckhart's avatar
eckhart committed
529 530 531
    ERR_MSG_SUFFIX = '_err_msg__'
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD,
                        RESUME_RULES_KEYWORD, ERR_MSG_SUFFIX}
532
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
533
                "Potentially due to erroneous AST transformation."
534 535 536 537
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
538
    REPEATABLE_DIRECTIVES = {'tokens'}
539

540

eckhart's avatar
eckhart committed
541
    def __init__(self, grammar_name="DSL", grammar_source=""):
542
        self.grammar_id = 0
eckhart's avatar
eckhart committed
543 544
        super(EBNFCompiler, self).__init__()  # calls the _reset()-method
        self.set_grammar_name(grammar_name, grammar_source)
545

546

547
    def _reset(self):
548
        super(EBNFCompiler, self)._reset()
549
        self._result = ''           # type: str
550
        self.re_flags = set()       # type: Set[str]
551 552 553
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
554 555
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
556
        self.definitions = {}       # type: Dict[str, str]
557
        self.deferred_tasks = []    # type: List[Callable]
558
        self.root_symbol = ""       # type: str
559 560 561
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
562
        self.consumed_skip_rules = set()     # type: Set[str]
563 564
        self.grammar_id += 1

565

Eckhart Arnold's avatar
Eckhart Arnold committed
566
    @property
567
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
568 569
        return self._result

eckhart's avatar
eckhart committed
570 571 572 573 574 575 576 577 578 579 580 581

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
Eckhart Arnold's avatar
Eckhart Arnold committed
582
        self.grammar_name = grammar_name or "NameUnknown"
eckhart's avatar
eckhart committed
583 584 585 586
        self.grammar_source = load_if_file(grammar_source)
        return self


587
    # methods for generating skeleton code for preprocessor, transformer, and compiler
588

589
    def gen_preprocessor_skeleton(self) -> str:
590 591 592 593
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
594
        name = self.grammar_name + "Preprocessor"
595
        return "def %s(text):\n    return text, lambda i: i\n" % name \
596
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
597

598

599
    def gen_transformer_skeleton(self) -> str:
600 601 602 603
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
604
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
605 606
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
607
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
608
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
609
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
610
        transtable.append('    "<": flatten,')
611
        for name in self.rules:
eckhart's avatar
eckhart committed
612
            transformations = '[]'
613 614 615 616 617
            # rule = self.definitions[name]
            # if rule.startswith('Alternative'):
            #     transformations = '[replace_or_reduce]'
            # elif rule.startswith('Synonym'):
            #     transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
618
            transtable.append('    "' + name + '": %s,' % transformations)
619
        # transtable.append('    ":Token": reduce_single_child,')
eckhart's avatar
eckhart committed
620
        transtable += ['    "*": replace_by_single_child', '}', '']
621
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
622 623
        return '\n'.join(transtable)

624

625
    def gen_compiler_skeleton(self) -> str:
626 627 628 629
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
630
        if not self.rules:
631 632
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
633
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
634 635
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
636
                    '    """', '',
eckhart's avatar
eckhart committed
637 638 639
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
640 641 642
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
643
        for name in self.rules:
eckhart's avatar
eckhart committed
644
            method_name = visitor_name(name)
645
            if name == self.root_symbol:
646
                compiler += ['    def ' + method_name + '(self, node):',
647
                             '        return self.fallback_compiler(node)', '']
648
            else:
di68kap's avatar
di68kap committed
649
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
650
                             '    #     return node', '']
651
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
652
        return '\n'.join(compiler)
653

654
    def verify_transformation_table(self, transtable):
655 656 657 658 659 660
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
661
        assert self._dirty_flag
662
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
663 664 665 666 667 668
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
669
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING))
670 671
        return messages

672 673 674 675 676 677 678
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
679

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
703
        elif value[0] + value[-1] == '//' and value != '//':
704 705 706 707
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
708
    def _gen_search_rule(self, nd: Node) -> ReprType:
709 710 711
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
712
        """
713
        if nd.tag_name == 'regexp':
714
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
715
        elif nd.tag_name == 'literal':
716 717 718 719
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''

720 721 722 723 724 725 726
    def _gen_search_list(self, nodes: Sequence[Node]) -> List[Union[unrepr, str]]:
        search_list = []  # type: List[Union[unrepr, str]]
        for child in nodes:
            rule = self._gen_search_rule(child)
            search_list.append(rule if rule else unrepr(child.content.strip()))
        return search_list

727

728 729 730 731 732
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
733 734 735 736 737 738 739 740 741 742

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

743 744 745
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
746
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
747

748 749
        # add special fields for Grammar class

750 751
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
752 753 754
        if DROP_WSPC in self.directives.drop:
            definitions.append((self.DROP_WHITESPACE_PARSER_KEYWORD,
                                'DropWhitespace(%s)' % self.WHITESPACE_KEYWORD))
755
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
756 757
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
758 759
        definitions.append((self.RAW_WS_KEYWORD, "r'{}'".format(self.directives.whitespace)))
        definitions.append((self.COMMENT_KEYWORD, "r'{}'".format(self.directives.comment)))
760 761 762

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
763
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
764
        for symbol, raw_rules in self.directives.resume.items():
765 766 767 768 769
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
770
                        refined = self._gen_search_rule(nd)
771 772 773 774 775 776 777 778 779 780 781
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
782

eckhart's avatar
eckhart committed
783 784
        # prepare and add customized error-messages

785
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
786
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
787 788 789 790 791 792 793 794 795 796
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

797 798 799 800 801 802
        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_custom_errors:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, 'Customized error message for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
803 804 805 806 807
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)

        # prepare and add skip-rules

        for symbol, skip in self.directives.skip.items():
Eckhart Arnold's avatar
Eckhart Arnold committed
808
            skip_rules = []  # type: List[ReprType]
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
            for search in skip:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                skip_rules.append(search)
            definitions.append((symbol + self.SKIP_RULES_SUFFIX, repr(skip_rules)))

        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_skip_rules:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, '"Skip-rules" for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
826

827 828
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
829

830
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
831
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
832 833 834 835
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
836
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
eckhart's avatar
eckhart committed
837
        definitions.append(('parser_initialization__', '["upon instantiation"]'))
838
        definitions.append(('static_analysis_pending__', '[True]'))
839
        if self.grammar_source:
840
            definitions.append(('source_hash__',
841
                                '"%s"' % md5(self.grammar_source, __version__)))
842
            declarations.append('')
843 844
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
845 846 847 848 849
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
850

851
        self.root_symbol = definitions[0][0] if definitions else ""
852 853 854 855 856 857 858 859
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
860 861 862 863 864 865

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
866
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
867
                                    "Missing definition for symbol '%s'" % symbol)
868
                # root_node.error_flag = True
869 870 871

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
872 873 874
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
875
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
876
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
877 878 879 880 881 882 883
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
884 885 886
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
887

888
        # set root_symbol parser and assemble python grammar definition
889

890 891
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
892
        declarations.append('')