ebnf.py 37.2 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
from collections import OrderedDict
21
from functools import partial
22

23
24
25
26
try:
    import regex as re
except ImportError:
    import re
27
try:
Eckhart Arnold's avatar
Eckhart Arnold committed
28
    from typing import Callable, Dict, List, Set, Tuple, Union
29
except ImportError:
Eckhart Arnold's avatar
Eckhart Arnold committed
30
    from .typing34 import Callable, Dict, List, Set, Tuple, Union
31

32
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name
33
from DHParser.parser import Grammar, mixin_comment, nil_preprocessor, Forward, RE, NegativeLookahead, \
34
    Alternative, Series, Option, Required, OneOrMore, ZeroOrMore, Token, Compiler, \
35
    PreprocessorFunc
36
37
from DHParser.syntaxtree import Node, TransformationFunc, WHITESPACE_PTYPE, TOKEN_PTYPE
from DHParser.error import Error
38
from DHParser.transform import traverse, remove_brackets, \
39
    reduce_single_child, replace_by_single_child, remove_expendables, \
40
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
41
from DHParser.versionnumber import __version__
42

43
__all__ = ('get_ebnf_preprocessor',
44
45
46
47
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
48
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
49
           'EBNFCompilerError',
50
           'EBNFCompiler',
51
           'grammar_changed',
52
           'PreprocessorFactoryFunc',
53
54
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
55
           'CompilerFactoryFunc')
56
57


Eckhart Arnold's avatar
Eckhart Arnold committed
58
59
60
61
62
63
64
########################################################################
#
# EBNF scanning
#
########################################################################


65
66
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
67
68
69
70
71
72
73
74


########################################################################
#
# EBNF parsing
#
########################################################################

75

76
class EBNFGrammar(Grammar):
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol §"=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { factor }+
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] group
                | [flowmarker] oneormore
                | repetition
                | option

    flowmarker =  "!"  | "&"  | "§" |                # '!' negative lookahead, '&' positive lookahead, '§' required
                  "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

    group      =  "(" expression §")"
    oneormore  =  "{" expression "}+"
    repetition =  "{" expression §"}"
106
107
    option     =  "[" expression §"]"

108
109
110
111
112
113
    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
114
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
115
116
117
118
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
119
    source_hash__ = "a410e1727fb7575e98ff8451dbf8f3bd"
120
    parser_initialization__ = "upon instantiation"
121
122
    COMMENT__ = r'#.*(?:\n|$)'
    WSP__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)')
123
    wspL__ = ''
124
    wspR__ = WSP__
125
    EOF = NegativeLookahead(RE('.', wR=''))
126
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
127
    regexp = RE(r'~?/(?:\\/|[^/])*?/~?')  # RE('~?/(?:[^/]|(?<=\\\\)/)*/~?')
128
129
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
130
131
132
133
    option = Series(Token("["), expression, Required(Token("]")))
    repetition = Series(Token("{"), expression, Required(Token("}")))
    oneormore = Series(Token("{"), expression, Token("}+"))
    group = Series(Token("("), expression, Required(Token(")")))
134
135
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("§"), Token("-!"), Token("-&"))
136
137
138
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Series(Option(flowmarker), literal), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), oneormore),
139
                         repetition, option)
140
    term = OneOrMore(factor)
141
142
143
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
    directive = Series(Token("@"), Required(symbol), Required(Token("=")), Alternative(regexp, literal, list_))
    definition = Series(symbol, Required(Token("=")), expression)
144
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), Required(EOF))
145
146
147
    root__ = syntax


148
def grammar_changed(grammar_class, grammar_source: str) -> bool:
Eckhart Arnold's avatar
Eckhart Arnold committed
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    """Returns ``True`` if ``grammar_class`` does not reflect the latest
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
168
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
169
170
171
172
173
174
175
176
177
178
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


179
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


196
EBNF_AST_transformation_table = {
197
    # AST Transformations for EBNF-grammar
198
    "+":
199
        remove_expendables,
200
    "syntax":
201
        [],  # otherwise '"*": replace_by_single_child' would be applied
202
    "directive, definition":
203
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
204
    "expression":
205
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
206
207
208
209
210
    "term":
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
    "factor, flowmarker, retrieveop":
        replace_by_single_child,
    "group":
211
        [remove_brackets, replace_by_single_child],
212
    "oneormore, repetition, option":
213
214
        [reduce_single_child, remove_brackets,
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
215
    "symbol, literal, regexp":
216
        reduce_single_child,
217
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
218
        reduce_single_child,
219
    "list_":
220
        [flatten, remove_infix_operator],
221
    "*":
222
        replace_by_single_child
223
224
}

225

Eckhart Arnold's avatar
Eckhart Arnold committed
226
def EBNFTransform() -> TransformationFunc:
227
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
228

229
def get_ebnf_transformer() -> TransformationFunc:
230
231
232
233
234
235
236
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
237
238
239
240
241
242
243
244


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

245

246
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
247
ParserFactoryFunc = Callable[[], Grammar]
248
TransformerFactoryFunc = Callable[[], TransformationFunc]
249
250
CompilerFactoryFunc = Callable[[], Compiler]

251
252
253
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
254
255
256
257
'''


GRAMMAR_FACTORY = '''
258
def get_grammar() -> {NAME}Grammar:
259
260
261
262
263
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
264
265
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
266
267
268
269
'''


TRANSFORMER_FACTORY = '''
270
271
272
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

273
def get_transformer() -> TransformationFunc:
274
275
276
277
278
279
280
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
281
282
283
284
'''


COMPILER_FACTORY = '''
285
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
286
287
288
289
290
291
292
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
293
294
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
295
296
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
297

298
299
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
300
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
301
302
303
    pass


304
class EBNFCompiler(Compiler):
305
306
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
307
    in EBNF-Notation.
308
309
310
311
312
313
314
315
316
317
318
319
320

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
321
        current_symbols:  During compilation, a list containing the root
322
323
324
325
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

326
        rules:  Dictionary that maps rule names to a list of Nodes that
327
328
329
330
331
332
333
334
335
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

                Now `[str(node) for node in self.rules['alternative']]`
                yields `['alternative = a | b', 'a', 'b']`

336
        symbols:  A mapping of symbol names to their first usage (not
337
338
                their definition!) in the EBNF source.

339
        variables:  A set of symbols names that are used with the
340
341
342
343
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

344
        recursive:  A set of symbols that are used recursively and
345
346
                therefore require a `Forward`-operator.

347
        definitions:  A dictionary of definitions. Other than `rules`
348
349
                this maps the symbols to their compiled definienda.

350
        deferred_taks:  A list of callables that is filled during
351
352
353
354
355
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

356
        root:   The name of the root symbol.
357

358
        directives:  A dictionary of all directives and their default
359
                values.
360
361
362

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
363
364
    """
    COMMENT_KEYWORD = "COMMENT__"
365
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
366
367
    RAW_WS_KEYWORD = "WHITESPACE__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
368
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
369
                "Potentially due to erroneous AST transformation."
370
371
372
373
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
374
375
376
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
377

378

379
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
380
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
381
382
        self._reset()

383

384
    def _reset(self):
385
        super(EBNFCompiler, self)._reset()
386
        self._result = ''           # type: str
387
        self.re_flags = set()       # type: Set[str]
388
389
390
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
391
392
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
393
        self.definitions = {}       # type: Dict[str, str]
394
        self.deferred_tasks = []    # type: List[Callable]
395
        self.root_symbol = ""  # type: str
396
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
397
                           'comment': '',
398
                           'literalws': ['right'],
399
400
                           'tokens': set(),  # alt. 'preprocessor_tokens'
                           'filter': dict(),  # alt. 'filter'
Eckhart Arnold's avatar
Eckhart Arnold committed
401
                           'ignorecase': False}
402

Eckhart Arnold's avatar
Eckhart Arnold committed
403
    @property
404
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
405
406
        return self._result

407
    # methods for generating skeleton code for preprocessor, transformer, and compiler
408

409
    def gen_preprocessor_skeleton(self) -> str:
410
411
412
413
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
414
        name = self.grammar_name + "Preprocessor"
415
        return "def %s(text):\n    return text\n" % name \
416
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
417

418

419
    def gen_transformer_skeleton(self) -> str:
420
421
422
423
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
424
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
425
426
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
427
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
428
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
429
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
430
        transtable.append('    "+": remove_empty,')
431
        for name in self.rules:
432
433
434
435
436
437
438
            tf = '[]'
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
                tf = '[replace_or_reduce]'
            elif rule.startswith('Synonym'):
                tf = '[replace_by_single_child]'
            transtable.append('    "' + name + '": %s,' % tf)
439
        transtable.append('    ":Token, :RE": reduce_single_child,')
440
        transtable += ['    "*": replace_by_single_child', '}', '']
441
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
442
443
        return '\n'.join(transtable)

444

445
    def gen_compiler_skeleton(self) -> str:
446
447
448
449
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
450
        if not self.rules:
451
452
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
453
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
454
455
456
457
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
458
                    self.grammar_name + '", grammar_source=""):',
459
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
460
                    'Compiler, self).__init__(grammar_name, grammar_source)',
461
                    "        assert re.match('\w+\Z', grammar_name)", '']
462
        for name in self.rules:
463
            method_name = Compiler.method_name(name)
464
            if name == self.root_symbol:
465
                compiler += ['    def ' + method_name + '(self, node):',
466
467
                             '        return node', '']
            else:
468
                compiler += ['    def ' + method_name + '(self, node):',
469
                             '        pass', '']
470
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
471
        return '\n'.join(compiler)
472

473

474
475
476
477
478
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
479
480
481
482
483
484
485
486
487
488

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

489
490
491
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
492
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
493

494
495
        # add special fields for Grammar class

496
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
497
                            if 'right' in self.directives['literalws'] else "''"))
498
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
499
                            if 'left' in self.directives['literalws'] else "''"))
500
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
501
502
503
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
504
505
506
507
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
508

509
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
510
        declarations = ['class ' + self.grammar_name +
511
                        'Grammar(Grammar):',
512
513
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
514
                        (', with this grammar:' if self.grammar_source else '.')]
515
        definitions.append(('parser_initialization__', '"upon instantiation"'))
516
        if self.grammar_source:
517
            definitions.append(('source_hash__',
518
                                '"%s"' % md5(self.grammar_source, __version__)))
519
            declarations.append('')
520
            declarations += [line for line in self.grammar_source.split('\n')]
521
522
523
524
525
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
526

527
        self.root_symbol = definitions[0][0] if definitions else ""
528
529
530
531
532
533
534
535
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
536
537
538
539
540
541
542

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
543
                # root_node.error_flag = True
544
545
546

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
547
548
549
550
551
552
553
554
555
556
557
558
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
            self.rules[leftover][0].add_error(('Rule "%s" is not connected to '
                'parser root "%s" !') % (leftover, self.root_symbol), Error.WARNING)
559

560
        # set root_symbol parser and assemble python grammar definition
561

562
563
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
564
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
565
566
567
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
568

569
570
571

    ## compilation methods

572
    def on_syntax(self, node: Node) -> str:
573
        definitions = []  # type: List[Tuple[str, str]]
574
575

        # drop the wrapping sequence node
576
577
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
578
579

        # compile definitions and directives and collect definitions
580
        for nd in node.children:
581
            if nd.parser.name == "definition":
582
                definitions.append(self.compile(nd))
583
            else:
584
                assert nd.parser.name == "directive", nd.as_sxpr()
585
                self.compile(nd)
586
                node.error_flag = max(node.error_flag, nd.error_flag)
587
        self.definitions.update(definitions)
588

589
        return self.assemble_parser(definitions, node)
590

591

592
    def on_definition(self, node: Node) -> Tuple[str, str]:
593
        rule = str(node.children[0])
594
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
595
596
597
598
599
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
600
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
601
602
603
604
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
605
        elif rule in self.directives['tokens']:
606
            node.add_error('Symbol "%s" has already been defined as '
607
                           'a preprocessor token.' % rule)
608
609
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
610
                           % rule + '(This may change in the future.)')
611
        try:
612
613
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
614
            defn = self.compile(node.children[1])
615
            if rule in self.variables:
616
                defn = 'Capture(%s)' % defn
617
                self.variables.remove(rule)
618
619
620
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
621
        except TypeError as error:
622
623
624
625
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
626
627
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
628
        return rule, defn
629

630

631
    def _check_rx(self, node: Node, rx: str) -> str:
632
633
        """
        Checks whether the string `rx` represents a valid regular
634
635
636
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
637
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
638
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
639
640
641
642
643
644
645
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

646

647
    def on_directive(self, node: Node) -> str:
648
        key = str(node.children[0]).lower()
649
        assert key not in self.directives['tokens']
650

651
        if key in {'comment', 'whitespace'}:
652
653
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
654
                    node.add_error('Directive "%s" must have one, but not %i values.' %
655
                                   (key, len(node.children[1].result)))
656
                value = self.compile(node.children[1]).pop()
657
658
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
659
                else:
660
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
661
            else:
662
663
                value = str(node.children[1]).strip("~")  # cast(str, node.children[1].result).strip("~")
                if value != str(node.children[1]):  # cast(str, node.children[1].result):
664
665
666
667
668
669
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
670
671
672
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
673
            self.directives[key] = value
674

675
676
677
678
679
680
        elif key == 'ignorecase':
            value = str(node.children[1]).lower() not in {"off", "false", "no"}
            self.directives['ignorecase'] == value
            if value:
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
681
682
683
        # elif key == 'testing':
        #     value = str(node.children[1])
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
684

685
        elif key == 'literalws':
686
            value = {item.lower() for item in self.compile(node.children[1])}
687
            if (len(value - {'left', 'right', 'both', 'none'}) > 0
Eckhart Arnold's avatar
Eckhart Arnold committed
688
                    or ('none' in value and len(value) > 1)):
689
690
691
692
693
694
695
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
            ws = {'left', 'right'} if 'both' in value \
                else {} if 'none' in value else value
            self.directives[key] = list(ws)

696
        elif key in {'tokens', 'preprocessor_tokens'}:
697
            self.directives['tokens'] |= self.compile(node.children[1])
698

699
        elif key.endswith('_filter'):
700
            filter_set = self.compile(node.children[1])
701
702
703
704
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
705

706
707
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
708
                           (key, ', '.join(list(self.directives.keys()))))
709
710
        return ""

711

712
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
713
714
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
715
716
        name for the particular non-terminal.
        """
717
        arguments = [self.compile(r) for r in node.children] + custom_args
718
719
        return parser_class + '(' + ', '.join(arguments) + ')'

720

721
    def on_expression(self, node) -> str:
722
723
        return self.non_terminal(node, 'Alternative')

724

725
    def on_term(self, node) -> str:
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
            if nd.parser.ptype == TOKEN_PTYPE and str(nd) == "§":
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 code=Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', code=Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
        mandatory_marker.append(Series.NOPE)
        compiled = self.non_terminal(node, 'Series', ['mandatory=%i' % mandatory_marker[0]])
        node.result = saved_result
        return compiled
747

748

749
    def on_factor(self, node: Node) -> str:
750
        assert node.children
751
        assert len(node.children) >= 2, node.as_sxpr()
752
        prefix = str(node.children[0])  # cast(str, node.children[0].result)
753
        custom_args = []  # type: List[str]
754
755

        if prefix in {'::', ':'}:
756
757
            assert len(node.children) == 2
            arg = node.children[-1]
758
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
759
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
760
761
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
762
            if str(arg) in self.directives['filter']:
763
                custom_args = ['filter=%s' % self.directives['filter'][str(arg)]]
764
            self.variables.add(str(arg))  # cast(str, arg.result)
765

766
        elif len(node.children) > 2:
767
768
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
769
770
771
772
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
773

774
        node.result = node.children[1:]
775
776
        try:
            parser_class = self.PREFIX_TABLE[prefix]
777
778
779
780
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
781
782
783
784
785
786
787
788
789
790
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
                        symlist = self.rules.get(str(nd), [])
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
791
792
793
                    if (nd.parser.name != "regexp" or str(nd)[:1] != '/'
                        or str(nd)[-1:] != '/'):
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
794
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
795
796
797
798

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
799
800
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
801
        return ""
802

803

804
    def on_option(self, node) -> str:
805
        return self.non_terminal(node, 'Option')
806

807

808
    def on_repetition(self, node) -> str:
809
810
        return self.non_terminal(node, 'ZeroOrMore')

811

812
    def on_oneormore(self, node) -> str:
813
814
        return self.non_terminal(node, 'OneOrMore')

815

816
    def on_regexchain(self, node) -> str:
817
818
        raise EBNFCompilerError("Not yet implemented!")

819

820
    def on_group(self, node) -> str:
821
822
823
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

824

825
826
827
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
        symbol = str(node)  # ; assert result == cast(str, node.result)
        if symbol in self.directives['tokens']:
828
            return 'PreprocessorToken("' + symbol + '")'
829
        else:
830
831
            self.current_symbols.append(node)
            if symbol not in self.symbols:
832
                self.symbols[symbol] = node  # remember first use of symbol
833
834
            if symbol in self.rules:
                self.recursive.add(symbol)
Eckhart Arnold's avatar
Eckhart Arnold committed
835
            if symbol in EBNFCompiler.RESERVED_SYMBOLS:  # (EBNFCompiler.WHITESPACE_KEYWORD, EBNFCompiler.COMMENT_KEYWORD):
Eckhart Arnold's avatar
Eckhart Arnold committed
836
                return "RegExp(%s)" % symbol
837
            return symbol
838

839

840
    def on_literal(self, node) -> str:
841
        return 'Token(' + str(node).replace('\\', r'\\') + ')'  # return 'Token(' + ', '.merge_children([node.result]) + ')' ?
842

843

844
    def on_regexp(self, node: Node) -> str:
845
        rx = str(node)
846
        name = []   # type: List[str]
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
        if rx[0] == '/' and rx[-1] == '/':
            parser = 'RegExp('
        else:
            parser = 'RE('
            if rx[:2] == '~/':
                if not 'left' in self.directives['literalws']:
                    name = ['wL=' + self.WHITESPACE_KEYWORD] + name
                rx = rx[1:]
            elif 'left' in self.directives['literalws']:
                name = ["wL=''"] + name
            if rx[-2:] == '/~':
                if 'right' not in self.directives['literalws']:
                    name = ['wR=' + self.WHITESPACE_KEYWORD] + name
                rx = rx[:-1]
            elif 'right' in self.directives['literalws']:
                name = ["wR=''"] + name
863
864
865
        try:
            arg = repr(self._check_rx(node, rx[1:-1].replace(r'\/', '/')))
        except AttributeError as error:
866
867
868
869
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (AttributeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
870
871
            node.add_error(errmsg)
            return '"' + errmsg + '"'
872
        return parser + ', '.join([arg] + name) + ')'
873

874

875
    def on_list_(self, node) -> Set[str]:
876
        assert node.children
877
        return set(item.result.strip() for item in node.children)
878
879


880
def get_ebnf_compiler(grammar_name="", grammar_source="") -> EBNFCompiler:
Eckhart Arnold's avatar
Eckhart Arnold committed
881
882
883
884
885
886
887
888
    global thread_local_ebnf_compiler_singleton
    try:
        compiler = thread_local_ebnf_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
        return compiler
    except NameError:
        thread_local_ebnf_compiler_singleton = EBNFCompiler(grammar_name, grammar_source)
        return thread_local_ebnf_compiler_singleton