The expiration time for new job artifacts in CI/CD pipelines is now 30 days (GitLab default). Previously generated artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

ebnf.py 34.9 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
21
from collections import OrderedDict

22
23
24
25
try:
    import regex as re
except ImportError:
    import re
26
27
28
29
try:
    from typing import Callable, Dict, List, Set, Tuple
except ImportError:
    from .typing34 import Callable, Dict, List, Set, Tuple
30

31
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name
32
from DHParser.parser import Grammar, mixin_comment, nil_preprocessor, Forward, RE, NegativeLookahead, \
33
    Alternative, Series, Optional, Required, OneOrMore, ZeroOrMore, Token, Compiler, \
34
    PreprocessorFunc
35
36
37
from DHParser.syntaxtree import WHITESPACE_PTYPE, TOKEN_PTYPE, Node, TransformationFunc
from DHParser.transform import traverse, remove_brackets, \
    reduce_single_child, replace_by_single_child, remove_expendables, \
38
    remove_tokens, flatten, forbid, assert_content, key_tag_name, remove_infix_operator
39
from DHParser.versionnumber import __version__
40

41
__all__ = ('get_ebnf_preprocessor',
42
43
44
45
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
46
           'EBNFTransformer',
Eckhart Arnold's avatar
Eckhart Arnold committed
47
           'EBNFCompilerError',
48
           'EBNFCompiler',
49
           'grammar_changed',
50
           'PreprocessorFactoryFunc',
51
52
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
53
           'CompilerFactoryFunc')
54
55


Eckhart Arnold's avatar
Eckhart Arnold committed
56
57
58
59
60
61
62
########################################################################
#
# EBNF scanning
#
########################################################################


63
64
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
65
66
67
68
69
70
71
72


########################################################################
#
# EBNF parsing
#
########################################################################

73

74
class EBNFGrammar(Grammar):
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol §"=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { factor }+
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] group
                | [flowmarker] oneormore
                | repetition
                | option

    flowmarker =  "!"  | "&"  | "§" |                # '!' negative lookahead, '&' positive lookahead, '§' required
                  "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

    group      =  "(" expression §")"
    oneormore  =  "{" expression "}+"
    repetition =  "{" expression §"}"
104
105
    option     =  "[" expression §"]"

106
107
108
109
110
111
    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
112
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
113
114
115
116
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
117
    source_hash__ = "a410e1727fb7575e98ff8451dbf8f3bd"
118
    parser_initialization__ = "upon instantiation"
119
120
    COMMENT__ = r'#.*(?:\n|$)'
    WSP__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)')
121
    wspL__ = ''
122
    wspR__ = WSP__
123
    EOF = NegativeLookahead(RE('.', wR=''))
124
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
125
    regexp = RE(r'~?/(?:\\/|[^/])*?/~?')  # RE('~?/(?:[^/]|(?<=\\\\)/)*/~?')
126
127
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
128
129
130
131
    option = Series(Token("["), expression, Required(Token("]")))
    repetition = Series(Token("{"), expression, Required(Token("}")))
    oneormore = Series(Token("{"), expression, Token("}+"))
    group = Series(Token("("), expression, Required(Token(")")))
132
133
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("§"), Token("-!"), Token("-&"))
134
135
    factor = Alternative(Series(Optional(flowmarker), Optional(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Series(Optional(flowmarker), literal), Series(Optional(flowmarker), regexp),
136
137
                         Series(Optional(flowmarker), group), Series(Optional(flowmarker), oneormore),
                         repetition, option)
138
    term = OneOrMore(factor)
139
140
141
142
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
    directive = Series(Token("@"), Required(symbol), Required(Token("=")), Alternative(regexp, literal, list_))
    definition = Series(symbol, Required(Token("=")), expression)
    syntax = Series(Optional(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), Required(EOF))
143
144
145
    root__ = syntax


146
def grammar_changed(grammar_class, grammar_source: str) -> bool:
Eckhart Arnold's avatar
Eckhart Arnold committed
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
    """Returns ``True`` if ``grammar_class`` does not reflect the latest
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
166
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
167
168
169
170
171
172
173
174
175
176
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


177
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


194
EBNF_transformation_table = {
195
    # AST Transformations for EBNF-grammar
196
    "+":
197
        remove_expendables,
198
    "syntax":
199
        [],  # otherwise '"*": replace_by_single_child' would be applied
200
    "directive, definition":
201
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
202
    "expression":
203
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
204
205
206
207
208
    "term":
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
    "factor, flowmarker, retrieveop":
        replace_by_single_child,
    "group":
209
        [remove_brackets, replace_by_single_child],
210
    "oneormore, repetition, option":
211
212
        [reduce_single_child, remove_brackets,
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
213
    "symbol, literal, regexp":
214
        reduce_single_child,
215
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
216
        reduce_single_child,
217
    "list_":
218
        [flatten, remove_infix_operator],
219
    "*":
220
        replace_by_single_child
221
222
}

223

224
def EBNFTransformer(syntax_tree: Node):
225
    traverse(syntax_tree, EBNF_transformation_table, key_tag_name)
di68kap's avatar
di68kap committed
226
227


228
def get_ebnf_transformer() -> TransformationFunc:
229
    return EBNFTransformer
Eckhart Arnold's avatar
Eckhart Arnold committed
230
231
232
233
234
235
236
237


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

238

239
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
240
ParserFactoryFunc = Callable[[], Grammar]
241
TransformerFactoryFunc = Callable[[], TransformationFunc]
242
243
CompilerFactoryFunc = Callable[[], Compiler]

244
245
246
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
247
248
249
250
'''


GRAMMAR_FACTORY = '''
251
def get_grammar() -> {NAME}Grammar:
252
253
254
255
256
257
258
259
260
261
262
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
        return grammar
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
        return thread_local_{NAME}_grammar_singleton
'''


TRANSFORMER_FACTORY = '''
263
def get_transformer() -> TransformationFunc:
264
265
266
267
268
    return {NAME}Transform
'''


COMPILER_FACTORY = '''
269
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
270
271
272
273
274
275
276
277
278
279
280
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
        return compiler
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
        return thread_local_{NAME}_compiler_singleton 
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
281

282
283
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
284
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
285
286
287
    pass


288
class EBNFCompiler(Compiler):
289
290
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
291
    in EBNF-Notation.
292
293
294
295
296
297
298
299
300
301
302
303
304

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
305
        current_symbols:  During compilation, a list containing the root
306
307
308
309
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

310
        rules:  Dictionary that maps rule names to a list of Nodes that
311
312
313
314
315
316
317
318
319
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

                Now `[str(node) for node in self.rules['alternative']]`
                yields `['alternative = a | b', 'a', 'b']`

320
        symbols:  A mapping of symbol names to their first usage (not
321
322
                their definition!) in the EBNF source.

323
        variables:  A set of symbols names that are used with the
324
325
326
327
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

328
        recursive:  A set of symbols that are used recursively and
329
330
                therefore require a `Forward`-operator.

331
        definitions:  A dictionary of definitions. Other than `rules`
332
333
                this maps the symbols to their compiled definienda.

334
        deferred_taks:  A list of callables that is filled during
335
336
337
338
339
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

340
        root:   The name of the root symbol.
341

342
        directives:  A dictionary of all directives and their default
343
                values.
344
345
    """
    COMMENT_KEYWORD = "COMMENT__"
346
347
    WHITESPACE_KEYWORD = "WSP__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, COMMENT_KEYWORD}
348
349
    AST_ERROR = "Badly structured syntax tree. " \
                "Potentially due to erroneuos AST transformation."
350
351
352
353
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
354
355
356
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
357

358

359
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
360
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
361
362
        self._reset()

363

364
    def _reset(self):
365
        super(EBNFCompiler, self)._reset()
366
        self._result = ''           # type: str
367
368
369
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
370
371
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
372
        self.definitions = {}       # type: Dict[str, str]
373
        self.deferred_tasks = []    # type: List[Callable]
374
        self.root_symbol = ""  # type: str
375
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
376
                           'comment': '',
377
                           'literalws': ['right'],
378
379
380
                           'tokens': set(),  # alt. 'preprocessor_tokens'
                           'filter': dict(),  # alt. 'filter'
                           'testing': False}
381

Eckhart Arnold's avatar
Eckhart Arnold committed
382
    @property
383
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
384
385
        return self._result

386
    # methods for generating skeleton code for preprocessor, transformer, and compiler
387

388
    def gen_preprocessor_skeleton(self) -> str:
389
390
391
392
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
393
        name = self.grammar_name + "Preprocessor"
394
        return "def %s(text):\n    return text\n" % name \
395
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
396

397

398
    def gen_transformer_skeleton(self) -> str:
399
400
401
402
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
403
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
404
405
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
406
407
        tt_name = self.grammar_name + '_AST_transformation_table'
        tf_name = self.grammar_name + 'Transform'
di68kap's avatar
di68kap committed
408
        transtable = [tt_name + ' = {',
409
410
                      '    # AST Transformations for the ' +
                      self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
411
        transtable.append('    "+": remove_empty,')
412
        for name in self.rules:
413
414
415
416
417
418
419
            tf = '[]'
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
                tf = '[replace_or_reduce]'
            elif rule.startswith('Synonym'):
                tf = '[replace_by_single_child]'
            transtable.append('    "' + name + '": %s,' % tf)
420
        transtable.append('    ":Token, :RE": reduce_single_child,')
421
        transtable += ['    "*": replace_by_single_child', '}', '', tf_name +
422
                       ' = partial(traverse, processing_table=%s)' % tt_name, '']
423
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
424
425
        return '\n'.join(transtable)

426

427
    def gen_compiler_skeleton(self) -> str:
428
429
430
431
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
432
        if not self.rules:
433
434
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
435
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
436
437
438
439
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
440
                    self.grammar_name + '", grammar_source=""):',
441
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
442
                    'Compiler, self).__init__(grammar_name, grammar_source)',
443
                    "        assert re.match('\w+\Z', grammar_name)", '']
444
        for name in self.rules:
445
            method_name = Compiler.method_name(name)
446
            if name == self.root_symbol:
447
                compiler += ['    def ' + method_name + '(self, node):',
448
449
                             '        return node', '']
            else:
450
                compiler += ['    def ' + method_name + '(self, node):',
451
                             '        pass', '']
452
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
453
        return '\n'.join(compiler)
454

455

456
457
458
459
460
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
461
462
463
464
465
466
467
468
469
470

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

471
472
473
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
474
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
475

476
477
        # add special fields for Grammar class

478
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
479
                            if 'right' in self.directives['literalws'] else "''"))
480
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
481
                            if 'left' in self.directives['literalws'] else "''"))
482
        definitions.append((self.WHITESPACE_KEYWORD,
483
484
485
486
487
488
489
                            ("mixin_comment(whitespace="
                             "r'{whitespace}', comment=r'{comment}')").
                            format(**self.directives)))
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
490

491
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
492
        declarations = ['class ' + self.grammar_name +
493
                        'Grammar(Grammar):',
494
495
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
496
                        (', with this grammar:' if self.grammar_source else '.')]
497
        definitions.append(('parser_initialization__', '"upon instantiation"'))
498
        if self.grammar_source:
499
            definitions.append(('source_hash__',
500
                                '"%s"' % md5(self.grammar_source, __version__)))
501
            declarations.append('')
502
            declarations += [line for line in self.grammar_source.split('\n')]
503
504
505
506
507
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
508

509
        self.root_symbol = definitions[0][0] if definitions else ""
510
511
512
513
514
515
516
517
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
518
519
520
521
522
523
524

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
525
                # root_node.error_flag = True
526
527
528

        # check for unconnected rules

529
        if not self.directives['testing']:
530
531
532
533
534
535
536
537
            defined_symbols.difference_update(self.RESERVED_SYMBOLS)

            def remove_connections(symbol):
                if symbol in defined_symbols:
                    defined_symbols.remove(symbol)
                    for related in self.rules[symbol][1:]:
                        remove_connections(str(related))

538
            remove_connections(self.root_symbol)
539
540
            for leftover in defined_symbols:
                self.rules[leftover][0].add_error(('Rule "%s" is not connected to parser '
541
542
                                                   'root "%s" !') % (leftover,
                                                                     self.root_symbol) + ' (Use directive "@testing=True" '
543
                    'to supress this error message.)')
544
                # root_node.error_flag = True
545

546
        # set root_symbol parser and assemble python grammar definition
547

548
549
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
550
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
551
552
553
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
554

555
556
557

    ## compilation methods

558
    def on_syntax(self, node: Node) -> str:
559
        definitions = []  # type: List[Tuple[str, str]]
560
561

        # drop the wrapping sequence node
562
563
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
564
565

        # compile definitions and directives and collect definitions
566
        for nd in node.children:
567
            if nd.parser.name == "definition":
568
                definitions.append(self.compile(nd))
569
            else:
570
                assert nd.parser.name == "directive", nd.as_sxpr()
571
                self.compile(nd)
572
                node.error_flag = node.error_flag or nd.error_flag
573
        self.definitions.update(definitions)
574

575
        return self.assemble_parser(definitions, node)
576

577

578
    def on_definition(self, node: Node) -> Tuple[str, str]:
579
        rule = str(node.children[0])
580
581
582
        if rule in self.rules:
            node.add_error('A rule with name "%s" has already been defined.' % rule)
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
583
584
585
586
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
587
        elif rule in self.directives['tokens']:
588
            node.add_error('Symbol "%s" has already been defined as '
589
                           'a preprocessor token.' % rule)
590
591
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
592
                           % rule + '(This may change in the future.)')
593
        try:
594
595
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
596
            defn = self.compile(node.children[1])
597
            if rule in self.variables:
598
                defn = 'Capture(%s)' % defn
599
                self.variables.remove(rule)
600
601
602
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
603
        except TypeError as error:
604
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + node.as_sxpr()
605
606
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
607
        return rule, defn
608

609

610
    @staticmethod
611
    def _check_rx(node: Node, rx: str) -> str:
612
613
        """
        Checks whether the string `rx` represents a valid regular
614
615
616
617
618
619
620
621
622
623
624
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        rx = rx if rx.find('\n') < 0 or rx[0:4] == '(?x)' else '(?x)' + rx
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

625

626
    def on_directive(self, node: Node) -> str:
627
        key = str(node.children[0]).lower()
628
        assert key not in self.directives['tokens']
629

630
        if key in {'comment', 'whitespace'}:
631
632
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
633
                    node.add_error('Directive "%s" must have one, but not %i values.' %
634
                                   (key, len(node.children[1].result)))
635
                value = self.compile(node.children[1]).pop()
636
637
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
638
                else:
639
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
640
            else:
641
642
                value = str(node.children[1]).strip("~")  # cast(str, node.children[1].result).strip("~")
                if value != str(node.children[1]):  # cast(str, node.children[1].result):
643
644
645
646
647
648
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
649
650
651
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
652
            self.directives[key] = value
653

654
655
656
657
        elif key == 'testing':
            value = str(node.children[1])
            self.directives['testing'] = value.lower() not in {"off", "false", "no"}

658
        elif key == 'literalws':
659
            value = {item.lower() for item in self.compile(node.children[1])}
660
            if (len(value - {'left', 'right', 'both', 'none'}) > 0
Eckhart Arnold's avatar
Eckhart Arnold committed
661
                    or ('none' in value and len(value) > 1)):
662
663
664
665
666
667
668
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
            ws = {'left', 'right'} if 'both' in value \
                else {} if 'none' in value else value
            self.directives[key] = list(ws)

669
        elif key in {'tokens', 'preprocessor_tokens'}:
670
            self.directives['tokens'] |= self.compile(node.children[1])
671

672
        elif key.endswith('_filter'):
673
            filter_set = self.compile(node.children[1])
674
675
676
677
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
678

679
680
681
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
                           (key,
682
                            ', '.join(list(self.directives.keys()))))
683
684
        return ""

685

686
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
687
688
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
689
690
        name for the particular non-terminal.
        """
691
        arguments = [self.compile(r) for r in node.children] + custom_args
692
693
        return parser_class + '(' + ', '.join(arguments) + ')'

694

695
    def on_expression(self, node) -> str:
696
697
        return self.non_terminal(node, 'Alternative')

698

699
    def on_term(self, node) -> str:
700
        return self.non_terminal(node, 'Series')
701

702

703
    def on_factor(self, node: Node) -> str:
704
        assert node.children
705
        assert len(node.children) >= 2, node.as_sxpr()
706
        prefix = str(node.children[0])  # cast(str, node.children[0].result)
707
        custom_args = []  # type: List[str]
708
709

        if prefix in {'::', ':'}:
710
711
            assert len(node.children) == 2
            arg = node.children[-1]
712
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
713
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
714
715
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
716
            if str(arg) in self.directives['filter']:
717
                custom_args = ['filter=%s' % self.directives['filter'][str(arg)]]
718
            self.variables.add(str(arg))  # cast(str, arg.result)
719

720
        elif len(node.children) > 2:
721
722
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
723
724
725
726
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
727

728
        node.result = node.children[1:]
729
730
        try:
            parser_class = self.PREFIX_TABLE[prefix]
731
732
733
734
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
735
736
737
738
739
740
741
742
743
744
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
                        symlist = self.rules.get(str(nd), [])
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
745
746
747
                    if (nd.parser.name != "regexp" or str(nd)[:1] != '/'
                        or str(nd)[-1:] != '/'):
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
748
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
749
750
751
752

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
753
754
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
755
        return ""
756

757

758
    def on_option(self, node) -> str:
759
760
        return self.non_terminal(node, 'Optional')

761

762
    def on_repetition(self, node) -> str:
763
764
        return self.non_terminal(node, 'ZeroOrMore')

765

766
    def on_oneormore(self, node) -> str:
767
768
        return self.non_terminal(node, 'OneOrMore')

769

770
    def on_regexchain(self, node) -> str:
771
772
        raise EBNFCompilerError("Not yet implemented!")

773

774
    def on_group(self, node) -> str:
775
776
777
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

778

779
780
781
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
        symbol = str(node)  # ; assert result == cast(str, node.result)
        if symbol in self.directives['tokens']:
782
            return 'PreprocessorToken("' + symbol + '")'
783
        else:
784
785
            self.current_symbols.append(node)
            if symbol not in self.symbols:
786
                self.symbols[symbol] = node  # remember first use of symbol
787
788
            if symbol in self.rules:
                self.recursive.add(symbol)
Eckhart Arnold's avatar
Eckhart Arnold committed
789
790
            if symbol in (EBNFCompiler.WHITESPACE_KEYWORD, EBNFCompiler.COMMENT_KEYWORD):
                return "RegExp(%s)" % symbol
791
            return symbol
792

793

794
    def on_literal(self, node) -> str:
795
        return 'Token(' + str(node).replace('\\', r'\\') + ')'  # return 'Token(' + ', '.merge_children([node.result]) + ')' ?
796

797

798
    def on_regexp(self, node: Node) -> str:
799
        rx = str(node)
800
        name = []   # type: List[str]
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
        if rx[0] == '/' and rx[-1] == '/':
            parser = 'RegExp('
        else:
            parser = 'RE('
            if rx[:2] == '~/':
                if not 'left' in self.directives['literalws']:
                    name = ['wL=' + self.WHITESPACE_KEYWORD] + name
                rx = rx[1:]
            elif 'left' in self.directives['literalws']:
                name = ["wL=''"] + name
            if rx[-2:] == '/~':
                if 'right' not in self.directives['literalws']:
                    name = ['wR=' + self.WHITESPACE_KEYWORD] + name
                rx = rx[:-1]
            elif 'right' in self.directives['literalws']:
                name = ["wR=''"] + name
817
818
819
820
        try:
            arg = repr(self._check_rx(node, rx[1:-1].replace(r'\/', '/')))
        except AttributeError as error:
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + \
821
                     node.as_sxpr()
822
823
            node.add_error(errmsg)
            return '"' + errmsg + '"'
824
        return parser + ', '.join([arg] + name) + ')'
825

826

827
    def on_list_(self, node) -> Set[str]:
828
        assert node.children
829
        return set(item.result.strip() for item in node.children)
830
831


832
def get_ebnf_compiler(grammar_name="", grammar_source="") -> EBNFCompiler:
Eckhart Arnold's avatar
Eckhart Arnold committed
833
834
835
836
837
838
839
840
    global thread_local_ebnf_compiler_singleton
    try:
        compiler = thread_local_ebnf_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
        return compiler
    except NameError:
        thread_local_ebnf_compiler_singleton = EBNFCompiler(grammar_name, grammar_source)
        return thread_local_ebnf_compiler_singleton