Starting from 2021-07-01, all LRZ GitLab users will be required to explicitly accept the GitLab Terms of Service. Please see the detailed information at https://doku.lrz.de/display/PUBLIC/GitLab and make sure that your projects conform to the requirements.

ebnf.py 36.2 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
from collections import OrderedDict
21
from functools import partial
22

23 24 25 26
try:
    import regex as re
except ImportError:
    import re
27 28 29 30
try:
    from typing import Callable, Dict, List, Set, Tuple
except ImportError:
    from .typing34 import Callable, Dict, List, Set, Tuple
31

32
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name
33
from DHParser.parser import Grammar, mixin_comment, nil_preprocessor, Forward, RE, NegativeLookahead, \
34
    Alternative, Series, Optional, Required, OneOrMore, ZeroOrMore, Token, Compiler, \
35
    PreprocessorFunc
36
from DHParser.syntaxtree import WHITESPACE_PTYPE, TOKEN_PTYPE, Node, TransformationFunc
37
from DHParser.transform import TransformationDict, traverse, remove_brackets, \
38
    reduce_single_child, replace_by_single_child, remove_expendables, \
39
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
40
from DHParser.versionnumber import __version__
41

42
__all__ = ('get_ebnf_preprocessor',
43 44 45 46
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
47
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
48
           'EBNFCompilerError',
49
           'EBNFCompiler',
50
           'grammar_changed',
51
           'PreprocessorFactoryFunc',
52 53
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
54
           'CompilerFactoryFunc')
55 56


Eckhart Arnold's avatar
Eckhart Arnold committed
57 58 59 60 61 62 63
########################################################################
#
# EBNF scanning
#
########################################################################


64 65
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
66 67 68 69 70 71 72 73


########################################################################
#
# EBNF parsing
#
########################################################################

74

75
class EBNFGrammar(Grammar):
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol §"=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { factor }+
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] group
                | [flowmarker] oneormore
                | repetition
                | option

    flowmarker =  "!"  | "&"  | "§" |                # '!' negative lookahead, '&' positive lookahead, '§' required
                  "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

    group      =  "(" expression §")"
    oneormore  =  "{" expression "}+"
    repetition =  "{" expression §"}"
105 106
    option     =  "[" expression §"]"

107 108 109 110 111 112
    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
113
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
114 115 116 117
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
118
    source_hash__ = "a410e1727fb7575e98ff8451dbf8f3bd"
119
    parser_initialization__ = "upon instantiation"
120 121
    COMMENT__ = r'#.*(?:\n|$)'
    WSP__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)')
122
    wspL__ = ''
123
    wspR__ = WSP__
124
    EOF = NegativeLookahead(RE('.', wR=''))
125
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
126
    regexp = RE(r'~?/(?:\\/|[^/])*?/~?')  # RE('~?/(?:[^/]|(?<=\\\\)/)*/~?')
127 128
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
129 130 131 132
    option = Series(Token("["), expression, Required(Token("]")))
    repetition = Series(Token("{"), expression, Required(Token("}")))
    oneormore = Series(Token("{"), expression, Token("}+"))
    group = Series(Token("("), expression, Required(Token(")")))
133 134
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("§"), Token("-!"), Token("-&"))
135 136
    factor = Alternative(Series(Optional(flowmarker), Optional(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Series(Optional(flowmarker), literal), Series(Optional(flowmarker), regexp),
137 138
                         Series(Optional(flowmarker), group), Series(Optional(flowmarker), oneormore),
                         repetition, option)
139
    term = OneOrMore(factor)
140 141 142 143
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
    directive = Series(Token("@"), Required(symbol), Required(Token("=")), Alternative(regexp, literal, list_))
    definition = Series(symbol, Required(Token("=")), expression)
    syntax = Series(Optional(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), Required(EOF))
144 145 146
    root__ = syntax


147
def grammar_changed(grammar_class, grammar_source: str) -> bool:
Eckhart Arnold's avatar
Eckhart Arnold committed
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
    """Returns ``True`` if ``grammar_class`` does not reflect the latest
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
167
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
168 169 170 171 172 173 174 175 176 177
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


178
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


195
EBNF_AST_transformation_table = {
196
    # AST Transformations for EBNF-grammar
197
    "+":
198
        remove_expendables,
199
    "syntax":
200
        [],  # otherwise '"*": replace_by_single_child' would be applied
201
    "directive, definition":
202
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
203
    "expression":
204
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
205 206 207 208 209
    "term":
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
    "factor, flowmarker, retrieveop":
        replace_by_single_child,
    "group":
210
        [remove_brackets, replace_by_single_child],
211
    "oneormore, repetition, option":
212 213
        [reduce_single_child, remove_brackets,
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
214
    "symbol, literal, regexp":
215
        reduce_single_child,
216
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
217
        reduce_single_child,
218
    "list_":
219
        [flatten, remove_infix_operator],
220
    "*":
221
        replace_by_single_child
222 223
}

224

225 226
def EBNFTransform() -> TransformationDict:
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
227

228
def get_ebnf_transformer() -> TransformationFunc:
229 230 231 232 233 234 235
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
236 237 238 239 240 241 242 243


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

244

245
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
246
ParserFactoryFunc = Callable[[], Grammar]
247
TransformerFactoryFunc = Callable[[], TransformationFunc]
248 249
CompilerFactoryFunc = Callable[[], Compiler]

250 251 252
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
253 254 255 256
'''


GRAMMAR_FACTORY = '''
257
def get_grammar() -> {NAME}Grammar:
258 259 260 261 262
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
263 264
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
265 266 267 268
'''


TRANSFORMER_FACTORY = '''
269 270 271
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

272
def get_transformer() -> TransformationFunc:
273 274 275 276 277 278 279
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
280 281 282 283
'''


COMPILER_FACTORY = '''
284
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
285 286 287 288 289 290 291
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
292 293
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
294 295
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
296

297 298
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
299
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
300 301 302
    pass


303
class EBNFCompiler(Compiler):
304 305
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
306
    in EBNF-Notation.
307 308 309 310 311 312 313 314 315 316 317 318 319

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
320
        current_symbols:  During compilation, a list containing the root
321 322 323 324
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

325
        rules:  Dictionary that maps rule names to a list of Nodes that
326 327 328 329 330 331 332 333 334
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

                Now `[str(node) for node in self.rules['alternative']]`
                yields `['alternative = a | b', 'a', 'b']`

335
        symbols:  A mapping of symbol names to their first usage (not
336 337
                their definition!) in the EBNF source.

338
        variables:  A set of symbols names that are used with the
339 340 341 342
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

343
        recursive:  A set of symbols that are used recursively and
344 345
                therefore require a `Forward`-operator.

346
        definitions:  A dictionary of definitions. Other than `rules`
347 348
                this maps the symbols to their compiled definienda.

349
        deferred_taks:  A list of callables that is filled during
350 351 352 353 354
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

355
        root:   The name of the root symbol.
356

357
        directives:  A dictionary of all directives and their default
358
                values.
359 360 361

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
362 363
    """
    COMMENT_KEYWORD = "COMMENT__"
364 365
    WHITESPACE_KEYWORD = "WSP__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, COMMENT_KEYWORD}
366
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
367
                "Potentially due to erroneous AST transformation."
368 369 370 371
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
372 373 374
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
375

376

377
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
378
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
379 380
        self._reset()

381

382
    def _reset(self):
383
        super(EBNFCompiler, self)._reset()
384
        self._result = ''           # type: str
385
        self.re_flags = set()       # type: Set[str]
386 387 388
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
389 390
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
391
        self.definitions = {}       # type: Dict[str, str]
392
        self.deferred_tasks = []    # type: List[Callable]
393
        self.root_symbol = ""  # type: str
394
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
395
                           'comment': '',
396
                           'literalws': ['right'],
397 398
                           'tokens': set(),  # alt. 'preprocessor_tokens'
                           'filter': dict(),  # alt. 'filter'
399
                           'ignorecase': False,
400
                           'testing': False}
401

Eckhart Arnold's avatar
Eckhart Arnold committed
402
    @property
403
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
404 405
        return self._result

406
    # methods for generating skeleton code for preprocessor, transformer, and compiler
407

408
    def gen_preprocessor_skeleton(self) -> str:
409 410 411 412
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
413
        name = self.grammar_name + "Preprocessor"
414
        return "def %s(text):\n    return text\n" % name \
415
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
416

417

418
    def gen_transformer_skeleton(self) -> str:
419 420 421 422
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
423
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
424 425
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
426
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
427
        transtable = [tt_name + ' = {',
428 429
                      '    # AST Transformations for the ' +
                      self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
430
        transtable.append('    "+": remove_empty,')
431
        for name in self.rules:
432 433 434 435 436 437 438
            tf = '[]'
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
                tf = '[replace_or_reduce]'
            elif rule.startswith('Synonym'):
                tf = '[replace_by_single_child]'
            transtable.append('    "' + name + '": %s,' % tf)
439
        transtable.append('    ":Token, :RE": reduce_single_child,')
440
        transtable += ['    "*": replace_by_single_child', '}', '']
441
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
442 443
        return '\n'.join(transtable)

444

445
    def gen_compiler_skeleton(self) -> str:
446 447 448 449
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
450
        if not self.rules:
451 452
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
453
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
454 455 456 457
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
458
                    self.grammar_name + '", grammar_source=""):',
459
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
460
                    'Compiler, self).__init__(grammar_name, grammar_source)',
461
                    "        assert re.match('\w+\Z', grammar_name)", '']
462
        for name in self.rules:
463
            method_name = Compiler.method_name(name)
464
            if name == self.root_symbol:
465
                compiler += ['    def ' + method_name + '(self, node):',
466 467
                             '        return node', '']
            else:
468
                compiler += ['    def ' + method_name + '(self, node):',
469
                             '        pass', '']
470
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
471
        return '\n'.join(compiler)
472

473

474 475 476 477 478
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
479 480 481 482 483 484 485 486 487 488

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

489 490 491
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
492
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
493

494 495
        # add special fields for Grammar class

496
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
497
                            if 'right' in self.directives['literalws'] else "''"))
498
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
499
                            if 'left' in self.directives['literalws'] else "''"))
500
        definitions.append((self.WHITESPACE_KEYWORD,
501 502 503 504 505 506 507
                            ("mixin_comment(whitespace="
                             "r'{whitespace}', comment=r'{comment}')").
                            format(**self.directives)))
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
508

509
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
510
        declarations = ['class ' + self.grammar_name +
511
                        'Grammar(Grammar):',
512 513
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
514
                        (', with this grammar:' if self.grammar_source else '.')]
515
        definitions.append(('parser_initialization__', '"upon instantiation"'))
516
        if self.grammar_source:
517
            definitions.append(('source_hash__',
518
                                '"%s"' % md5(self.grammar_source, __version__)))
519
            declarations.append('')
520
            declarations += [line for line in self.grammar_source.split('\n')]
521 522 523 524 525
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
526

527
        self.root_symbol = definitions[0][0] if definitions else ""
528 529 530 531 532 533 534 535
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
536 537 538 539 540 541 542

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
543
                # root_node.error_flag = True
544 545 546

        # check for unconnected rules

547
        if not self.directives['testing']:
548 549 550 551 552 553 554 555
            defined_symbols.difference_update(self.RESERVED_SYMBOLS)

            def remove_connections(symbol):
                if symbol in defined_symbols:
                    defined_symbols.remove(symbol)
                    for related in self.rules[symbol][1:]:
                        remove_connections(str(related))

556
            remove_connections(self.root_symbol)
557 558
            for leftover in defined_symbols:
                self.rules[leftover][0].add_error(('Rule "%s" is not connected to parser '
559 560
                                                   'root "%s" !') % (leftover,
                                                                     self.root_symbol) + ' (Use directive "@testing=True" '
561
                    'to supress this error message.)')
562
                # root_node.error_flag = True
563

564
        # set root_symbol parser and assemble python grammar definition
565

566 567
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
568
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
569 570 571
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
572

573 574 575

    ## compilation methods

576
    def on_syntax(self, node: Node) -> str:
577
        definitions = []  # type: List[Tuple[str, str]]
578 579

        # drop the wrapping sequence node
580 581
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
582 583

        # compile definitions and directives and collect definitions
584
        for nd in node.children:
585
            if nd.parser.name == "definition":
586
                definitions.append(self.compile(nd))
587
            else:
588
                assert nd.parser.name == "directive", nd.as_sxpr()
589
                self.compile(nd)
590
                node.error_flag = node.error_flag or nd.error_flag
591
        self.definitions.update(definitions)
592

593
        return self.assemble_parser(definitions, node)
594

595

596
    def on_definition(self, node: Node) -> Tuple[str, str]:
597
        rule = str(node.children[0])
598
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
599 600 601 602 603
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
604
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
605 606 607 608
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
609
        elif rule in self.directives['tokens']:
610
            node.add_error('Symbol "%s" has already been defined as '
611
                           'a preprocessor token.' % rule)
612 613
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
614
                           % rule + '(This may change in the future.)')
615
        try:
616 617
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
618
            defn = self.compile(node.children[1])
619
            if rule in self.variables:
620
                defn = 'Capture(%s)' % defn
621
                self.variables.remove(rule)
622 623 624
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
625
        except TypeError as error:
626
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + node.as_sxpr()
627 628
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
629
        return rule, defn
630

631

632
    def _check_rx(self, node: Node, rx: str) -> str:
633 634
        """
        Checks whether the string `rx` represents a valid regular
635 636 637
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
638
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
639
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
640 641 642 643 644 645 646
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

647

648
    def on_directive(self, node: Node) -> str:
649
        key = str(node.children[0]).lower()
650
        assert key not in self.directives['tokens']
651

652
        if key in {'comment', 'whitespace'}:
653 654
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
655
                    node.add_error('Directive "%s" must have one, but not %i values.' %
656
                                   (key, len(node.children[1].result)))
657
                value = self.compile(node.children[1]).pop()
658 659
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
660
                else:
661
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
662
            else:
663 664
                value = str(node.children[1]).strip("~")  # cast(str, node.children[1].result).strip("~")
                if value != str(node.children[1]):  # cast(str, node.children[1].result):
665 666 667 668 669 670
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
671 672 673
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
674
            self.directives[key] = value
675

676 677 678 679 680 681
        elif key == 'ignorecase':
            value = str(node.children[1]).lower() not in {"off", "false", "no"}
            self.directives['ignorecase'] == value
            if value:
                self.re_flags.add('i')

682 683 684 685
        elif key == 'testing':
            value = str(node.children[1])
            self.directives['testing'] = value.lower() not in {"off", "false", "no"}

686
        elif key == 'literalws':
687
            value = {item.lower() for item in self.compile(node.children[1])}
688
            if (len(value - {'left', 'right', 'both', 'none'}) > 0
Eckhart Arnold's avatar
Eckhart Arnold committed
689
                    or ('none' in value and len(value) > 1)):
690 691 692 693 694 695 696
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
            ws = {'left', 'right'} if 'both' in value \
                else {} if 'none' in value else value
            self.directives[key] = list(ws)

697
        elif key in {'tokens', 'preprocessor_tokens'}:
698
            self.directives['tokens'] |= self.compile(node.children[1])
699

700
        elif key.endswith('_filter'):
701
            filter_set = self.compile(node.children[1])
702 703 704 705
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
706

707 708 709
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
                           (key,
710
                            ', '.join(list(self.directives.keys()))))
711 712
        return ""

713

714
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
715 716
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
717 718
        name for the particular non-terminal.
        """
719
        arguments = [self.compile(r) for r in node.children] + custom_args
720 721
        return parser_class + '(' + ', '.join(arguments) + ')'

722

723
    def on_expression(self, node) -> str:
724 725
        return self.non_terminal(node, 'Alternative')

726

727
    def on_term(self, node) -> str:
728
        return self.non_terminal(node, 'Series')
729

730

731
    def on_factor(self, node: Node) -> str:
732
        assert node.children
733
        assert len(node.children) >= 2, node.as_sxpr()
734
        prefix = str(node.children[0])  # cast(str, node.children[0].result)
735
        custom_args = []  # type: List[str]
736 737

        if prefix in {'::', ':'}:
738 739
            assert len(node.children) == 2
            arg = node.children[-1]
740
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
741
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
742 743
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
744
            if str(arg) in self.directives['filter']:
745
                custom_args = ['filter=%s' % self.directives['filter'][str(arg)]]
746
            self.variables.add(str(arg))  # cast(str, arg.result)
747

748
        elif len(node.children) > 2:
749 750
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
751 752 753 754
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
755

756
        node.result = node.children[1:]
757 758
        try:
            parser_class = self.PREFIX_TABLE[prefix]
759 760 761 762
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
763 764 765 766 767 768 769 770 771 772
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
                        symlist = self.rules.get(str(nd), [])
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
773 774 775
                    if (nd.parser.name != "regexp" or str(nd)[:1] != '/'
                        or str(nd)[-1:] != '/'):
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
776
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
777 778 779 780

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
781 782
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
783
        return ""
784

785

786
    def on_option(self, node) -> str:
787 788
        return self.non_terminal(node, 'Optional')

789