ebnf.py 36 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
from collections import OrderedDict
21
from functools import partial
22

23
24
25
26
try:
    import regex as re
except ImportError:
    import re
27
try:
Eckhart Arnold's avatar
Eckhart Arnold committed
28
    from typing import Callable, Dict, List, Set, Tuple, Union
29
except ImportError:
Eckhart Arnold's avatar
Eckhart Arnold committed
30
    from .typing34 import Callable, Dict, List, Set, Tuple, Union
31

32
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name
33
from DHParser.parser import Grammar, mixin_comment, nil_preprocessor, Forward, RE, NegativeLookahead, \
34
    Alternative, Series, Option, Required, OneOrMore, ZeroOrMore, Token, Compiler, \
35
    PreprocessorFunc
Eckhart Arnold's avatar
Eckhart Arnold committed
36
from DHParser.syntaxtree import WHITESPACE_PTYPE, TOKEN_PTYPE, Error, Node, TransformationFunc
37
from DHParser.transform import TransformationDict, traverse, remove_brackets, \
38
    reduce_single_child, replace_by_single_child, remove_expendables, \
39
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
40
from DHParser.versionnumber import __version__
41

42
__all__ = ('get_ebnf_preprocessor',
43
44
45
46
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
47
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
48
           'EBNFCompilerError',
49
           'EBNFCompiler',
50
           'grammar_changed',
51
           'PreprocessorFactoryFunc',
52
53
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
54
           'CompilerFactoryFunc')
55
56


Eckhart Arnold's avatar
Eckhart Arnold committed
57
58
59
60
61
62
63
########################################################################
#
# EBNF scanning
#
########################################################################


64
65
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
66
67
68
69
70
71
72
73


########################################################################
#
# EBNF parsing
#
########################################################################

74

75
class EBNFGrammar(Grammar):
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol §"=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { factor }+
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] group
                | [flowmarker] oneormore
                | repetition
                | option

    flowmarker =  "!"  | "&"  | "§" |                # '!' negative lookahead, '&' positive lookahead, '§' required
                  "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

    group      =  "(" expression §")"
    oneormore  =  "{" expression "}+"
    repetition =  "{" expression §"}"
105
106
    option     =  "[" expression §"]"

107
108
109
110
111
112
    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
113
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
114
115
116
117
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
118
    source_hash__ = "a410e1727fb7575e98ff8451dbf8f3bd"
119
    parser_initialization__ = "upon instantiation"
120
121
    COMMENT__ = r'#.*(?:\n|$)'
    WSP__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)')
122
    wspL__ = ''
123
    wspR__ = WSP__
124
    EOF = NegativeLookahead(RE('.', wR=''))
125
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
126
    regexp = RE(r'~?/(?:\\/|[^/])*?/~?')  # RE('~?/(?:[^/]|(?<=\\\\)/)*/~?')
127
128
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
129
130
131
132
    option = Series(Token("["), expression, Required(Token("]")))
    repetition = Series(Token("{"), expression, Required(Token("}")))
    oneormore = Series(Token("{"), expression, Token("}+"))
    group = Series(Token("("), expression, Required(Token(")")))
133
134
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("§"), Token("-!"), Token("-&"))
135
136
137
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Series(Option(flowmarker), literal), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), oneormore),
138
                         repetition, option)
139
    term = OneOrMore(factor)
140
141
142
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
    directive = Series(Token("@"), Required(symbol), Required(Token("=")), Alternative(regexp, literal, list_))
    definition = Series(symbol, Required(Token("=")), expression)
143
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), Required(EOF))
144
145
146
    root__ = syntax


147
def grammar_changed(grammar_class, grammar_source: str) -> bool:
Eckhart Arnold's avatar
Eckhart Arnold committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    """Returns ``True`` if ``grammar_class`` does not reflect the latest
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
167
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
168
169
170
171
172
173
174
175
176
177
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


178
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


195
EBNF_AST_transformation_table = {
196
    # AST Transformations for EBNF-grammar
197
    "+":
198
        remove_expendables,
199
    "syntax":
200
        [],  # otherwise '"*": replace_by_single_child' would be applied
201
    "directive, definition":
202
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
203
    "expression":
204
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
205
206
207
208
209
    "term":
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
    "factor, flowmarker, retrieveop":
        replace_by_single_child,
    "group":
210
        [remove_brackets, replace_by_single_child],
211
    "oneormore, repetition, option":
212
213
        [reduce_single_child, remove_brackets,
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
214
    "symbol, literal, regexp":
215
        reduce_single_child,
216
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
217
        reduce_single_child,
218
    "list_":
219
        [flatten, remove_infix_operator],
220
    "*":
221
        replace_by_single_child
222
223
}

224

Eckhart Arnold's avatar
Eckhart Arnold committed
225
def EBNFTransform() -> TransformationFunc:
226
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
227

228
def get_ebnf_transformer() -> TransformationFunc:
229
230
231
232
233
234
235
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
236
237
238
239
240
241
242
243


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

244

245
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
246
ParserFactoryFunc = Callable[[], Grammar]
247
TransformerFactoryFunc = Callable[[], TransformationFunc]
248
249
CompilerFactoryFunc = Callable[[], Compiler]

250
251
252
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
253
254
255
256
'''


GRAMMAR_FACTORY = '''
257
def get_grammar() -> {NAME}Grammar:
258
259
260
261
262
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
263
264
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
265
266
267
268
'''


TRANSFORMER_FACTORY = '''
269
270
271
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

272
def get_transformer() -> TransformationFunc:
273
274
275
276
277
278
279
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
280
281
282
283
'''


COMPILER_FACTORY = '''
284
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
285
286
287
288
289
290
291
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
292
293
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
294
295
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
296

297
298
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
299
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
300
301
302
    pass


303
class EBNFCompiler(Compiler):
304
305
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
306
    in EBNF-Notation.
307
308
309
310
311
312
313
314
315
316
317
318
319

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
320
        current_symbols:  During compilation, a list containing the root
321
322
323
324
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

325
        rules:  Dictionary that maps rule names to a list of Nodes that
326
327
328
329
330
331
332
333
334
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

                Now `[str(node) for node in self.rules['alternative']]`
                yields `['alternative = a | b', 'a', 'b']`

335
        symbols:  A mapping of symbol names to their first usage (not
336
337
                their definition!) in the EBNF source.

338
        variables:  A set of symbols names that are used with the
339
340
341
342
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

343
        recursive:  A set of symbols that are used recursively and
344
345
                therefore require a `Forward`-operator.

346
        definitions:  A dictionary of definitions. Other than `rules`
347
348
                this maps the symbols to their compiled definienda.

349
        deferred_taks:  A list of callables that is filled during
350
351
352
353
354
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

355
        root:   The name of the root symbol.
356

357
        directives:  A dictionary of all directives and their default
358
                values.
359
360
361

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
362
363
    """
    COMMENT_KEYWORD = "COMMENT__"
364
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
365
366
    RAW_WS_KEYWORD = "WHITESPACE__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
367
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
368
                "Potentially due to erroneous AST transformation."
369
370
371
372
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
373
374
375
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
376

377

378
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
379
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
380
381
        self._reset()

382

383
    def _reset(self):
384
        super(EBNFCompiler, self)._reset()
385
        self._result = ''           # type: str
386
        self.re_flags = set()       # type: Set[str]
387
388
389
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
390
391
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
392
        self.definitions = {}       # type: Dict[str, str]
393
        self.deferred_tasks = []    # type: List[Callable]
394
        self.root_symbol = ""  # type: str
395
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
396
                           'comment': '',
397
                           'literalws': ['right'],
398
399
                           'tokens': set(),  # alt. 'preprocessor_tokens'
                           'filter': dict(),  # alt. 'filter'
Eckhart Arnold's avatar
Eckhart Arnold committed
400
                           'ignorecase': False}
401

Eckhart Arnold's avatar
Eckhart Arnold committed
402
    @property
403
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
404
405
        return self._result

406
    # methods for generating skeleton code for preprocessor, transformer, and compiler
407

408
    def gen_preprocessor_skeleton(self) -> str:
409
410
411
412
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
413
        name = self.grammar_name + "Preprocessor"
414
        return "def %s(text):\n    return text\n" % name \
415
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
416

417

418
    def gen_transformer_skeleton(self) -> str:
419
420
421
422
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
423
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
424
425
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
426
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
427
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
428
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
429
        transtable.append('    "+": remove_empty,')
430
        for name in self.rules:
431
432
433
434
435
436
437
            tf = '[]'
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
                tf = '[replace_or_reduce]'
            elif rule.startswith('Synonym'):
                tf = '[replace_by_single_child]'
            transtable.append('    "' + name + '": %s,' % tf)
438
        transtable.append('    ":Token, :RE": reduce_single_child,')
439
        transtable += ['    "*": replace_by_single_child', '}', '']
440
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
441
442
        return '\n'.join(transtable)

443

444
    def gen_compiler_skeleton(self) -> str:
445
446
447
448
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
449
        if not self.rules:
450
451
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
452
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
453
454
455
456
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
457
                    self.grammar_name + '", grammar_source=""):',
458
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
459
                    'Compiler, self).__init__(grammar_name, grammar_source)',
460
                    "        assert re.match('\w+\Z', grammar_name)", '']
461
        for name in self.rules:
462
            method_name = Compiler.method_name(name)
463
            if name == self.root_symbol:
464
                compiler += ['    def ' + method_name + '(self, node):',
465
466
                             '        return node', '']
            else:
467
                compiler += ['    def ' + method_name + '(self, node):',
468
                             '        pass', '']
469
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
470
        return '\n'.join(compiler)
471

472

473
474
475
476
477
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
478
479
480
481
482
483
484
485
486
487

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

488
489
490
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
491
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
492

493
494
        # add special fields for Grammar class

495
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
496
                            if 'right' in self.directives['literalws'] else "''"))
497
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
498
                            if 'left' in self.directives['literalws'] else "''"))
499
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
500
501
502
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
503
504
505
506
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
507

508
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
509
        declarations = ['class ' + self.grammar_name +
510
                        'Grammar(Grammar):',
511
512
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
513
                        (', with this grammar:' if self.grammar_source else '.')]
514
        definitions.append(('parser_initialization__', '"upon instantiation"'))
515
        if self.grammar_source:
516
            definitions.append(('source_hash__',
517
                                '"%s"' % md5(self.grammar_source, __version__)))
518
            declarations.append('')
519
            declarations += [line for line in self.grammar_source.split('\n')]
520
521
522
523
524
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
525

526
        self.root_symbol = definitions[0][0] if definitions else ""
527
528
529
530
531
532
533
534
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
535
536
537
538
539
540
541

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
542
                # root_node.error_flag = True
543
544
545

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
546
547
548
549
550
551
552
553
554
555
556
557
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
            self.rules[leftover][0].add_error(('Rule "%s" is not connected to '
                'parser root "%s" !') % (leftover, self.root_symbol), Error.WARNING)
558

559
        # set root_symbol parser and assemble python grammar definition
560

561
562
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
563
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
564
565
566
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
567

568
569
570

    ## compilation methods

571
    def on_syntax(self, node: Node) -> str:
572
        definitions = []  # type: List[Tuple[str, str]]
573
574

        # drop the wrapping sequence node
575
576
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
577
578

        # compile definitions and directives and collect definitions
579
        for nd in node.children:
580
            if nd.parser.name == "definition":
581
                definitions.append(self.compile(nd))
582
            else:
583
                assert nd.parser.name == "directive", nd.as_sxpr()
584
                self.compile(nd)
585
                node.error_flag = max(node.error_flag, nd.error_flag)
586
        self.definitions.update(definitions)
587

588
        return self.assemble_parser(definitions, node)
589

590

591
    def on_definition(self, node: Node) -> Tuple[str, str]:
592
        rule = str(node.children[0])
593
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
594
595
596
597
598
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
599
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
600
601
602
603
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
604
        elif rule in self.directives['tokens']:
605
            node.add_error('Symbol "%s" has already been defined as '
606
                           'a preprocessor token.' % rule)
607
608
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
609
                           % rule + '(This may change in the future.)')
610
        try:
611
612
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
613
            defn = self.compile(node.children[1])
614
            if rule in self.variables:
615
                defn = 'Capture(%s)' % defn
616
                self.variables.remove(rule)
617
618
619
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
620
        except TypeError as error:
621
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + node.as_sxpr()
622
623
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
624
        return rule, defn
625

626

627
    def _check_rx(self, node: Node, rx: str) -> str:
628
629
        """
        Checks whether the string `rx` represents a valid regular
630
631
632
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
633
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
634
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
635
636
637
638
639
640
641
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

642

643
    def on_directive(self, node: Node) -> str:
644
        key = str(node.children[0]).lower()
645
        assert key not in self.directives['tokens']
646

647
        if key in {'comment', 'whitespace'}:
648
649
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
650
                    node.add_error('Directive "%s" must have one, but not %i values.' %
651
                                   (key, len(node.children[1].result)))
652
                value = self.compile(node.children[1]).pop()
653
654
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
655
                else:
656
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
657
            else:
658
659
                value = str(node.children[1]).strip("~")  # cast(str, node.children[1].result).strip("~")
                if value != str(node.children[1]):  # cast(str, node.children[1].result):
660
661
662
663
664
665
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
666
667
668
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
669
            self.directives[key] = value
670

671
672
673
674
675
676
        elif key == 'ignorecase':
            value = str(node.children[1]).lower() not in {"off", "false", "no"}
            self.directives['ignorecase'] == value
            if value:
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
677
678
679
        # elif key == 'testing':
        #     value = str(node.children[1])
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
680

681
        elif key == 'literalws':
682
            value = {item.lower() for item in self.compile(node.children[1])}
683
            if (len(value - {'left', 'right', 'both', 'none'}) > 0
Eckhart Arnold's avatar
Eckhart Arnold committed
684
                    or ('none' in value and len(value) > 1)):
685
686
687
688
689
690
691
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
            ws = {'left', 'right'} if 'both' in value \
                else {} if 'none' in value else value
            self.directives[key] = list(ws)

692
        elif key in {'tokens', 'preprocessor_tokens'}:
693
            self.directives['tokens'] |= self.compile(node.children[1])
694

695
        elif key.endswith('_filter'):
696
            filter_set = self.compile(node.children[1])
697
698
699
700
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
701

702
703
704
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
                           (key,
705
                            ', '.join(list(self.directives.keys()))))
706
707
        return ""

708

709
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
710
711
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
712
713
        name for the particular non-terminal.
        """
714
        arguments = [self.compile(r) for r in node.children] + custom_args
715
716
        return parser_class + '(' + ', '.join(arguments) + ')'

717

718
    def on_expression(self, node) -> str:
719
720
        return self.non_terminal(node, 'Alternative')

721

722
    def on_term(self, node) -> str:
723
        return self.non_terminal(node, 'Series')
724

725

726
    def on_factor(self, node: Node) -> str:
727
        assert node.children
728
        assert len(node.children) >= 2, node.as_sxpr()
729
        prefix = str(node.children[0])  # cast(str, node.children[0].result)
730
        custom_args = []  # type: List[str]
731
732

        if prefix in {'::', ':'}:
733
734
            assert len(node.children) == 2
            arg = node.children[-1]
735
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
736
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
737
738
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
739
            if str(arg) in self.directives['filter']:
740
                custom_args = ['filter=%s' % self.directives['filter'][str(arg)]]
741
            self.variables.add(str(arg))  # cast(str, arg.result)
742

743
        elif len(node.children) > 2:
744
745
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
746
747
748
749
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
750

751
        node.result = node.children[1:]
752
753
        try:
            parser_class = self.PREFIX_TABLE[prefix]
754
755
756
757
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
758
759
760
761
762
763
764
765
766
767
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
                        symlist = self.rules.get(str(nd), [])
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
768
769
770
                    if (nd.parser.name != "regexp" or str(nd)[:1] != '/'
                        or str(nd)[-1:] != '/'):
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
771
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
772
773
774
775

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
776
777
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
778
        return ""
779

780

781
    def on_option(self, node) -> str:
782
        return self.non_terminal(node, 'Option')
783

784

785
    def on_repetition(self, node) -> str:
786
787
        return self.non_terminal(node, 'ZeroOrMore')

788

789
    def on_oneormore(self, node) -> str:
790
791
        return self.non_terminal(node, 'OneOrMore')

792

793
    def on_regexchain(self, node) -> str:
794
795
        raise EBNFCompilerError("Not yet implemented!")

796

797
    def on_group(self, node) -> str:
798
799
800
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

801

802
803
804
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
        symbol = str(node)  # ; assert result == cast(str, node.result)
        if symbol in self.directives['tokens']:
805
            return 'PreprocessorToken("' + symbol + '")'
806
        else:
807
808
            self.current_symbols.append(node)
            if symbol not in self.symbols:
809
                self.symbols[symbol] = node  # remember first use of symbol
810
811
            if symbol in self.rules:
                self.recursive.add(symbol)
Eckhart Arnold's avatar
Eckhart Arnold committed
812
            if symbol in EBNFCompiler.RESERVED_SYMBOLS:  # (EBNFCompiler.WHITESPACE_KEYWORD, EBNFCompiler.COMMENT_KEYWORD):
Eckhart Arnold's avatar
Eckhart Arnold committed
813
                return "RegExp(%s)" % symbol
814
            return symbol
815

816

817
    def on_literal(self, node) -> str:
818
        return 'Token(' + str(node).replace('\\', r'\\') + ')'  # return 'Token(' + ', '.merge_children([node.result]) + ')' ?
819

820

821
    def on_regexp(self, node: Node) -> str:
822
        rx = str(node)
823
        name = []   # type: List[str]
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
        if rx[0] == '/' and rx[-1] == '/':
            parser = 'RegExp('
        else:
            parser = 'RE('
            if rx[:2] == '~/':
                if not 'left' in self.directives['literalws']:
                    name = ['wL=' + self.WHITESPACE_KEYWORD] + name
                rx = rx[1:]
            elif 'left' in self.directives['literalws']:
                name = ["wL=''"] + name
            if rx[-2:] == '/~':
                if 'right' not in self.directives['literalws']:
                    name = ['wR=' + self.WHITESPACE_KEYWORD] + name
                rx = rx[:-1]
            elif 'right' in self.directives['literalws']:
                name = ["wR=''"] + name
840
841
842
843
        try:
            arg = repr(self._check_rx(node, rx[1:-1].replace(r'\/', '/')))
        except AttributeError as error:
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + \
844
                     node.as_sxpr()
845
846
            node.add_error(errmsg)
            return '"' + errmsg + '"'
847
        return parser + ', '.join([arg] + name) + ')'
848

849

850
    def on_list_(self, node) -> Set[str]:
851
        assert node.children
852
        return set(item.result.strip() for item in node.children)
853
854


855
def get_ebnf_compiler(grammar_name="", grammar_source="") -> EBNFCompiler:
Eckhart Arnold's avatar
Eckhart Arnold committed
856
857
858
859
860
861
862
863
    global thread_local_ebnf_compiler_singleton
    try:
        compiler = thread_local_ebnf_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
        return compiler
    except NameError:
        thread_local_ebnf_compiler_singleton = EBNFCompiler(grammar_name, grammar_source)
        return thread_local_ebnf_compiler_singleton