Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

ebnf.py 39.4 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
from collections import OrderedDict
21
from functools import partial
22

23
from DHParser.error import Error
24
25
from DHParser.parser import Grammar, mixin_comment, nil_preprocessor, Forward, RegExp, RE, \
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
26
    Compiler, PreprocessorFunc
27
from DHParser.syntaxtree import Node, TransformationFunc, WHITESPACE_PTYPE, TOKEN_PTYPE
di68kap's avatar
di68kap committed
28
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, typing
29
from DHParser.transform import traverse, remove_brackets, \
30
    reduce_single_child, replace_by_single_child, remove_expendables, \
31
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
32
from DHParser.versionnumber import __version__
di68kap's avatar
di68kap committed
33
from typing import Callable, Dict, List, Set, Tuple
34

35
__all__ = ('get_ebnf_preprocessor',
36
37
38
39
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
40
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
41
           'EBNFCompilerError',
42
           'EBNFCompiler',
43
           'grammar_changed',
44
           'PreprocessorFactoryFunc',
45
46
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
47
           'CompilerFactoryFunc')
48
49


Eckhart Arnold's avatar
Eckhart Arnold committed
50
51
52
53
54
55
56
########################################################################
#
# EBNF scanning
#
########################################################################


57
58
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
59
60
61
62
63
64
65
66


########################################################################
#
# EBNF parsing
#
########################################################################

67

di68kap's avatar
di68kap committed
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
class EBNFGrammar(Grammar):
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol "=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] oneormore
87
88
                | [flowmarker] group
                | [flowmarker] unordered
di68kap's avatar
di68kap committed
89
90
91
92
93
94
95
                | repetition
                | option

    flowmarker =  "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
                | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

96
97
    group      =  "(" §expression ")"
    unordered  =  "<" §expression ">"                # elements of expression in arbitrary order
di68kap's avatar
di68kap committed
98
    oneormore  =  "{" expression "}+"
99
100
    repetition =  "{" §expression "}"
    option     =  "[" §expression "]"
di68kap's avatar
di68kap committed
101
102
103
104
105
106
107
108
109
110
111
112

    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:\\\/|[^\/])*?\/~?/~        # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
113
114
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
115
116
117
118
119
120
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
121
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
di68kap's avatar
di68kap committed
122
123
124
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
125
126
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
127
    oneormore = Series(Token("{"), expression, Token("}+"))
128
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
129
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
130
131
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
132
133
134
135
136
    factor = Alternative(
        Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
        Series(Option(flowmarker), literal), Series(Option(flowmarker), regexp),
        Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
        Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
137
138
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
139
140
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_),
                       mandatory=1)
di68kap's avatar
di68kap committed
141
    definition = Series(symbol, Token("="), expression, mandatory=1)
142
143
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)),
                    EOF, mandatory=2)
di68kap's avatar
di68kap committed
144
145
146
    root__ = syntax


147
def grammar_changed(grammar_class, grammar_source: str) -> bool:
Eckhart Arnold's avatar
Eckhart Arnold committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    """Returns ``True`` if ``grammar_class`` does not reflect the latest
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
167
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
168
169
170
171
172
173
174
175
176
177
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


178
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


195
EBNF_AST_transformation_table = {
196
    # AST Transformations for EBNF-grammar
197
    "+":
198
        remove_expendables,
199
    "syntax":
200
        [],  # otherwise '"*": replace_by_single_child' would be applied
201
    "directive, definition":
202
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
203
    "expression":
204
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
205
206
207
208
209
    "term":
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
    "factor, flowmarker, retrieveop":
        replace_by_single_child,
    "group":
210
        [remove_brackets, replace_by_single_child],
211
212
    "unordered":
        remove_brackets,
213
    "oneormore, repetition, option":
214
215
        [reduce_single_child, remove_brackets,
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
216
    "symbol, literal, regexp":
217
        reduce_single_child,
218
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
219
        reduce_single_child,
220
    "list_":
221
        [flatten, remove_infix_operator],
222
    "*":
223
        replace_by_single_child
224
225
}

226

Eckhart Arnold's avatar
Eckhart Arnold committed
227
def EBNFTransform() -> TransformationFunc:
228
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
229

230
def get_ebnf_transformer() -> TransformationFunc:
231
232
233
234
235
236
237
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
238
239
240
241
242
243
244
245


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

246

247
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
248
ParserFactoryFunc = Callable[[], Grammar]
249
TransformerFactoryFunc = Callable[[], TransformationFunc]
250
251
CompilerFactoryFunc = Callable[[], Compiler]

252
253
254
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
255
256
257
258
'''


GRAMMAR_FACTORY = '''
259
def get_grammar() -> {NAME}Grammar:
260
261
262
263
264
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
265
266
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
267
268
269
270
'''


TRANSFORMER_FACTORY = '''
271
272
273
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

274
def get_transformer() -> TransformationFunc:
275
276
277
278
279
280
281
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
282
283
284
285
'''


COMPILER_FACTORY = '''
286
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
287
288
289
290
291
292
293
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
294
295
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
296
297
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
298

299
300
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
301
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
302
303
304
    pass


305
class EBNFCompiler(Compiler):
306
307
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
308
    in EBNF-Notation.
309
310
311
312
313
314
315
316
317
318
319
320
321

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
322
        current_symbols:  During compilation, a list containing the root
323
324
325
326
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

327
        rules:  Dictionary that maps rule names to a list of Nodes that
328
329
330
331
332
333
334
335
336
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

                Now `[str(node) for node in self.rules['alternative']]`
                yields `['alternative = a | b', 'a', 'b']`

337
        symbols:  A mapping of symbol names to their first usage (not
338
339
                their definition!) in the EBNF source.

340
        variables:  A set of symbols names that are used with the
341
342
343
344
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

345
        recursive:  A set of symbols that are used recursively and
346
347
                therefore require a `Forward`-operator.

348
        definitions:  A dictionary of definitions. Other than `rules`
349
350
                this maps the symbols to their compiled definienda.

351
        deferred_taks:  A list of callables that is filled during
352
353
354
355
356
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

357
        root:   The name of the root symbol.
358

359
        directives:  A dictionary of all directives and their default
360
                values.
361
362
363

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
364
365
    """
    COMMENT_KEYWORD = "COMMENT__"
366
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
367
368
    RAW_WS_KEYWORD = "WHITESPACE__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
369
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
370
                "Potentially due to erroneous AST transformation."
371
372
373
374
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
375
376
377
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
378
    REPEATABLE_DIRECTIVES = {'tokens'}
379

380

381
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
382
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
383
384
        self._reset()

385

386
    def _reset(self):
387
        super(EBNFCompiler, self)._reset()
388
        self._result = ''           # type: str
389
        self.re_flags = set()       # type: Set[str]
390
391
392
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
393
394
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
395
        self.definitions = {}       # type: Dict[str, str]
396
        self.deferred_tasks = []    # type: List[Callable]
397
        self.root_symbol = ""       # type: str
398
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
399
                           'comment': '',
400
                           'literalws': {'right'},
401
                           'tokens': set(),  # alt. 'preprocessor_tokens'
402
403
404
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
405

Eckhart Arnold's avatar
Eckhart Arnold committed
406
    @property
407
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
408
409
        return self._result

410
    # methods for generating skeleton code for preprocessor, transformer, and compiler
411

412
    def gen_preprocessor_skeleton(self) -> str:
413
414
415
416
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
417
        name = self.grammar_name + "Preprocessor"
418
        return "def %s(text):\n    return text\n" % name \
419
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
420

421

422
    def gen_transformer_skeleton(self) -> str:
423
424
425
426
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
427
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
428
429
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
430
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
431
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
432
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
433
        transtable.append('    "+": remove_empty,')
434
        for name in self.rules:
eckhart's avatar
eckhart committed
435
            transformations = '[]'
436
437
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
438
                transformations = '[replace_or_reduce]'
439
            elif rule.startswith('Synonym'):
440
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
441
            transtable.append('    "' + name + '": %s,' % transformations)
442
        transtable.append('    ":Token, :RE": reduce_single_child,')
443
        transtable += ['    "*": replace_by_single_child', '}', '']
444
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
445
446
        return '\n'.join(transtable)

447

448
    def gen_compiler_skeleton(self) -> str:
449
450
451
452
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
453
        if not self.rules:
454
455
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
456
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
457
458
459
460
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
461
                    self.grammar_name + '", grammar_source=""):',
462
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
463
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
464
                    r"        assert re.match('\w+\Z', grammar_name)", '']
465
        for name in self.rules:
466
            method_name = Compiler.method_name(name)
467
            if name == self.root_symbol:
468
                compiler += ['    def ' + method_name + '(self, node):',
469
470
                             '        return node', '']
            else:
471
                compiler += ['    def ' + method_name + '(self, node):',
472
                             '        pass', '']
473
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
474
        return '\n'.join(compiler)
475

476

477
478
479
480
481
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
482
483
484
485
486
487
488
489
490
491

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

492
493
494
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
495
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
496

497
498
        # add special fields for Grammar class

499
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
500
                            if 'right' in self.directives['literalws'] else "''"))
501
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
502
                            if 'left' in self.directives['literalws'] else "''"))
503
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
504
505
506
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
507
508
509
510
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
511

512
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
513
        declarations = ['class ' + self.grammar_name +
514
                        'Grammar(Grammar):',
515
516
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
517
                        (', with this grammar:' if self.grammar_source else '.')]
518
        definitions.append(('parser_initialization__', '"upon instantiation"'))
519
        if self.grammar_source:
520
            definitions.append(('source_hash__',
521
                                '"%s"' % md5(self.grammar_source, __version__)))
522
            declarations.append('')
523
            declarations += [line for line in self.grammar_source.split('\n')]
524
525
526
527
528
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
529

530
        self.root_symbol = definitions[0][0] if definitions else ""
531
532
533
534
535
536
537
538
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
539
540
541
542
543
544
545

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
546
                # root_node.error_flag = True
547
548
549

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
550
551
552
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
553
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
554
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
555
556
557
558
559
560
561
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
562
563
564
            self.rules[leftover][0].add_error(
                ('Rule "%s" is not connected to parser root "%s" !') % 
                (leftover, self.root_symbol), Error.WARNING)
565

566
        # set root_symbol parser and assemble python grammar definition
567

568
569
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
570
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
571
572
573
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
574

575
576
577

    ## compilation methods

578
    def on_syntax(self, node: Node) -> str:
579
        definitions = []  # type: List[Tuple[str, str]]
580
581

        # drop the wrapping sequence node
582
583
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
584
585

        # compile definitions and directives and collect definitions
586
        for nd in node.children:
587
            if nd.parser.name == "definition":
588
                definitions.append(self.compile(nd))
589
            else:
590
                assert nd.parser.name == "directive", nd.as_sxpr()
591
                self.compile(nd)
592
            node.error_flag = max(node.error_flag, nd.error_flag)
593
        self.definitions.update(definitions)
594

595
        return self.assemble_parser(definitions, node)
596

597

598
    def on_definition(self, node: Node) -> Tuple[str, str]:
599
        rule = str(node.children[0])
600
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
601
602
603
604
605
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
606
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
607
608
609
610
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
611
        elif rule in self.directives['tokens']:
612
            node.add_error('Symbol "%s" has already been defined as '
613
                           'a preprocessor token.' % rule)
614
615
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
616
                           % rule + '(This may change in the future.)')
617
        try:
618
619
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
620
            defn = self.compile(node.children[1])
621
            if rule in self.variables:
622
                defn = 'Capture(%s)' % defn
623
                self.variables.remove(rule)
624
625
626
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
627
        except TypeError as error:
628
629
630
631
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
632
633
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
634
        return rule, defn
635

636

637
    def _check_rx(self, node: Node, rx: str) -> str:
638
639
        """
        Checks whether the string `rx` represents a valid regular
640
641
642
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
643
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
644
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
645
646
647
648
649
650
651
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

652

653
    def on_directive(self, node: Node) -> str:
654
        key = str(node.children[0]).lower()
655
        assert key not in self.directives['tokens']
656

657
658
659
660
661
662
663
664
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
                node.add_error('Directive "%s" has already been defined earlier. ' % key + \
                               'Later definition will be ignored!',
                               code=Error.REDEFINED_DIRECTIVE_WARNING)
                return ""
            self.defined_directives.add(key)

665
        if key in {'comment', 'whitespace'}:
666
667
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
668
                    node.add_error('Directive "%s" must have one, but not %i values.' %
669
                                   (key, len(node.children[1].result)))
670
                value = self.compile(node.children[1]).pop()
671
672
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
673
                else:
674
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
675
            else:
676
677
                value = str(node.children[1]).strip("~")  # cast(str, node.children[1].result).strip("~")
                if value != str(node.children[1]):  # cast(str, node.children[1].result):
678
679
680
681
682
683
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
684
685
686
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
687
            self.directives[key] = value
688

689
        elif key == 'ignorecase':
690
            if str(node.children[1]).lower() not in {"off", "false", "no"}:
691
692
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
693
694
695
        # elif key == 'testing':
        #     value = str(node.children[1])
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
696

697
        elif key == 'literalws':
698
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
699
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
700
                    or ('none' in value and len(value) > 1)):
701
702
703
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
704
            wsp = {'left', 'right'} if 'both' in value \
705
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
706
            self.directives[key] = list(wsp)
707

708
        elif key in {'tokens', 'preprocessor_tokens'}:
709
710
711
712
713
714
715
            tokens = self.compile(node.children[1])
            redeclared = self.directives['tokes'] & tokens
            if redeclared:
                node.add_error('Tokens %s have already been declared earlier. '
                               % str(redeclared) + 'Later declaration will be ignored',
                               code=Error.REDECLARED_TOKEN_WARNING)
            self.directives['tokens'] |= tokens - redeclared
716

717
        elif key.endswith('_filter'):
718
            filter_set = self.compile(node.children[1])
719
720
721
722
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
723

724
725
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
726
                           (key, ', '.join(list(self.directives.keys()))))
727
728
        return ""

729

730
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
731
732
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
733
734
        name for the particular non-terminal.
        """
735
        arguments = [self.compile(r) for r in node.children] + custom_args
736
        node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
737
738
        return parser_class + '(' + ', '.join(arguments) + ')'

739

740
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
741
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
742
743
        return self.non_terminal(node, 'Alternative')

744

745
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
            if nd.parser.ptype == TOKEN_PTYPE and str(nd) == "§":
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
767
        custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
di68kap's avatar
di68kap committed
768
769
770
        compiled = self.non_terminal(node, 'Series', custom_args)
        node.result = saved_result
        return compiled
771

772

773
    def on_factor(self, node: Node) -> str:
774
        assert node.children
775
        assert len(node.children) >= 2, node.as_sxpr()
776
        prefix = str(node.children[0])  # cast(str, node.children[0].result)
777
        custom_args = []  # type: List[str]
778
779

        if prefix in {'::', ':'}:
780
781
            assert len(node.children) == 2
            arg = node.children[-1]
782
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
783
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
784
785
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
786
            if str(arg) in self.directives['filter']:
787
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
788
            self.variables.add(str(arg))  # cast(str, arg.result)
789

790
        elif len(node.children) > 2:
791
792
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
793
794
795
796
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
797

798
        node.result = node.children[1:]
799
800
        try:
            parser_class = self.PREFIX_TABLE[prefix]
801
802
803
804
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
805
806
807
808
809
810
811
812
813
814
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
                        symlist = self.rules.get(str(nd), [])
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
815
816
817
                    if (nd.parser.name != "regexp" or str(nd)[:1] != '/'
                        or str(nd)[-1:] != '/'):
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
818
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
819
820
821
822

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
823
824
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
825
        return ""
826

827

828
    def on_option(self, node) -> str:
829
        return self.non_terminal(node, 'Option')
830

831

832
    def on_repetition(self, node) -> str:
833
834
        return self.non_terminal(node, 'ZeroOrMore')

835

836
    def on_oneormore(self, node) -> str:
837
838
        return self.non_terminal(node, 'OneOrMore')

839

840
    def on_regexchain(self, node) -> str:
841
842
        raise EBNFCompilerError("Not yet implemented!")

843

844
    def on_group(self, node) -> str:
845
846
847
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

848
    def on_unordered(self, node) -> str:
849
850
851
        # return self.non_terminal(node, 'Unordered')
        assert len(node.children) == 1
        nd = node.children[0]
eckhart's avatar
eckhart committed
852
853
        for child in nd.children:
            if child.parser.ptype == TOKEN_PTYPE and str(nd) == "§":
854
                node.add_error("Unordered parser lists cannot contain mandatory (§) items.")
eckhart's avatar
eckhart committed
855
        args = ', '.join(self.compile(child) for child in nd.children)
856
857
858
859
860
861
862
        if nd.parser.name == "term":
            return "AllOf(" + args + ")"
        elif nd.parser.name == "expression":
            return "SomeOf(" + args + ")"
        else:
            node.add_error("Unordered sequence or alternative requires at least two elements.")
            return ""
863

864
865
866
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
        symbol = str(node)  # ; assert result == cast(str, node.result)
        if symbol in self.directives['tokens']:
867
            return 'PreprocessorToken("' + symbol + '")'
868
        else:
869
870
            self.current_symbols.append(node)
            if symbol not in self.symbols:
871
                self.symbols[symbol] = node  # remember first use of symbol
872
873
            if symbol in self.rules:
                self.recursive.add(symbol)
Eckhart Arnold's avatar
Eckhart Arnold committed
874
            if symbol in EBNFCompiler.RESERVED_SYMBOLS:  # (EBNFCompiler.WHITESPACE_KEYWORD, EBNFCompiler.COMMENT_KEYWORD):
Eckhart Arnold's avatar
Eckhart Arnold committed
875
                return "RegExp(%s)" % symbol
876
            return symbol
877