The expiration time for new job artifacts in CI/CD pipelines is now 30 days (GitLab default). Previously generated artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

ebnf.py 39.4 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
from collections import OrderedDict
21
from functools import partial
22

23
from DHParser.error import Error
24
from DHParser.parsers import Grammar, mixin_comment, nil_preprocessor, Forward, RegExp, RE, \
25
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
26
    Compiler, PreprocessorFunc
27
from DHParser.syntaxtree import Node, TransformationFunc, WHITESPACE_PTYPE, TOKEN_PTYPE
di68kap's avatar
di68kap committed
28
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, typing
29
from DHParser.transform import traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
30
    reduce_single_child, replace_by_single_child, remove_expendables, \
31
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
32
from DHParser.versionnumber import __version__
di68kap's avatar
di68kap committed
33
from typing import Callable, Dict, List, Set, Tuple
34

35
__all__ = ('get_ebnf_preprocessor',
36
37
38
39
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
40
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
41
           'EBNFCompilerError',
42
           'EBNFCompiler',
43
           'grammar_changed',
44
           'PreprocessorFactoryFunc',
45
46
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
47
           'CompilerFactoryFunc')
48
49


Eckhart Arnold's avatar
Eckhart Arnold committed
50
51
52
53
54
55
56
########################################################################
#
# EBNF scanning
#
########################################################################


57
58
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
59
60
61
62
63
64
65
66


########################################################################
#
# EBNF parsing
#
########################################################################

67

di68kap's avatar
di68kap committed
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
class EBNFGrammar(Grammar):
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol "=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] oneormore
87
88
                | [flowmarker] group
                | [flowmarker] unordered
di68kap's avatar
di68kap committed
89
90
91
92
93
94
95
                | repetition
                | option

    flowmarker =  "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
                | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

96
97
    group      =  "(" §expression ")"
    unordered  =  "<" §expression ">"                # elements of expression in arbitrary order
di68kap's avatar
di68kap committed
98
    oneormore  =  "{" expression "}+"
99
100
    repetition =  "{" §expression "}"
    option     =  "[" §expression "]"
di68kap's avatar
di68kap committed
101
102
103
104
105
106
107
108
109
110
111
112

    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:\\\/|[^\/])*?\/~?/~        # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
113
114
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
115
116
117
118
119
120
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
121
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
di68kap's avatar
di68kap committed
122
123
124
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
125
126
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
127
    oneormore = Series(Token("{"), expression, Token("}+"))
128
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
129
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
130
131
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
132
133
134
135
136
    factor = Alternative(
        Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
        Series(Option(flowmarker), literal), Series(Option(flowmarker), regexp),
        Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
        Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
137
138
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
139
140
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_),
                       mandatory=1)
di68kap's avatar
di68kap committed
141
    definition = Series(symbol, Token("="), expression, mandatory=1)
142
143
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)),
                    EOF, mandatory=2)
di68kap's avatar
di68kap committed
144
145
146
    root__ = syntax


147
def grammar_changed(grammar_class, grammar_source: str) -> bool:
148
149
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
168
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
169
170
171
172
173
174
175
176
177
178
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


179
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


196
EBNF_AST_transformation_table = {
197
    # AST Transformations for EBNF-grammar
198
    "+":
199
        remove_expendables,
200
    "syntax":
201
        [],  # otherwise '"*": replace_by_single_child' would be applied
202
    "directive, definition":
203
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
204
    "expression":
205
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
206
    "term":
207
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
208
    "factor, flowmarker, retrieveop":
209
        replace_by_single_child,
210
    "group":
211
        [remove_brackets, replace_by_single_child],
212
213
    "unordered":
        remove_brackets,
214
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
215
        [reduce_single_child, remove_brackets,
216
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
217
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
218
        reduce_single_child,
219
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
220
        reduce_single_child,
221
    "list_":
222
        [flatten, remove_infix_operator],
223
    "*":
224
        replace_by_single_child
225
226
}

227

Eckhart Arnold's avatar
Eckhart Arnold committed
228
def EBNFTransform() -> TransformationFunc:
229
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
230

231
def get_ebnf_transformer() -> TransformationFunc:
232
233
234
235
236
237
238
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
239
240
241
242
243
244
245
246


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

247

248
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
249
ParserFactoryFunc = Callable[[], Grammar]
250
TransformerFactoryFunc = Callable[[], TransformationFunc]
251
252
CompilerFactoryFunc = Callable[[], Compiler]

253
254
255
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
256
257
258
259
'''


GRAMMAR_FACTORY = '''
260
def get_grammar() -> {NAME}Grammar:
261
262
263
264
265
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
266
267
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
268
269
270
271
'''


TRANSFORMER_FACTORY = '''
272
273
274
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

275
def get_transformer() -> TransformationFunc:
276
277
278
279
280
281
282
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
283
284
285
286
'''


COMPILER_FACTORY = '''
287
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
288
289
290
291
292
293
294
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
295
296
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
297
298
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
299

300
301
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
302
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
303
304
305
    pass


306
class EBNFCompiler(Compiler):
307
308
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
309
    in EBNF-Notation.
310
311
312
313
314
315
316
317
318
319
320
321
322

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
323
        current_symbols:  During compilation, a list containing the root
324
325
326
327
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

328
        rules:  Dictionary that maps rule names to a list of Nodes that
329
330
331
332
333
334
335
336
337
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

                Now `[str(node) for node in self.rules['alternative']]`
                yields `['alternative = a | b', 'a', 'b']`

338
        symbols:  A mapping of symbol names to their first usage (not
339
340
                their definition!) in the EBNF source.

341
        variables:  A set of symbols names that are used with the
342
343
344
345
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

346
        recursive:  A set of symbols that are used recursively and
347
348
                therefore require a `Forward`-operator.

349
        definitions:  A dictionary of definitions. Other than `rules`
350
351
                this maps the symbols to their compiled definienda.

352
        deferred_taks:  A list of callables that is filled during
353
354
355
356
357
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

358
        root:   The name of the root symbol.
359

360
        directives:  A dictionary of all directives and their default
361
                values.
362
363
364

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
365
366
    """
    COMMENT_KEYWORD = "COMMENT__"
367
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
368
369
    RAW_WS_KEYWORD = "WHITESPACE__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
370
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
371
                "Potentially due to erroneous AST transformation."
372
373
374
375
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
376
377
378
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
379
    REPEATABLE_DIRECTIVES = {'tokens'}
380

381

382
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
383
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
384
385
        self._reset()

386

387
    def _reset(self):
388
        super(EBNFCompiler, self)._reset()
389
        self._result = ''           # type: str
390
        self.re_flags = set()       # type: Set[str]
391
392
393
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
394
395
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
396
        self.definitions = {}       # type: Dict[str, str]
397
        self.deferred_tasks = []    # type: List[Callable]
398
        self.root_symbol = ""       # type: str
399
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
400
                           'comment': '',
401
                           'literalws': {'right'},
402
                           'tokens': set(),  # alt. 'preprocessor_tokens'
403
404
405
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
406

Eckhart Arnold's avatar
Eckhart Arnold committed
407
    @property
408
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
409
410
        return self._result

411
    # methods for generating skeleton code for preprocessor, transformer, and compiler
412

413
    def gen_preprocessor_skeleton(self) -> str:
414
415
416
417
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
418
        name = self.grammar_name + "Preprocessor"
419
        return "def %s(text):\n    return text\n" % name \
420
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
421

422

423
    def gen_transformer_skeleton(self) -> str:
424
425
426
427
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
428
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
429
430
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
431
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
432
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
433
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
434
        transtable.append('    "+": remove_empty,')
435
        for name in self.rules:
eckhart's avatar
eckhart committed
436
            transformations = '[]'
437
438
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
439
                transformations = '[replace_or_reduce]'
440
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
441
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
442
            transtable.append('    "' + name + '": %s,' % transformations)
Eckhart Arnold's avatar
Eckhart Arnold committed
443
        transtable.append('    ":Token, :RE": reduce_single_child,')
444
        transtable += ['    "*": replace_by_single_child', '}', '']
445
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
446
447
        return '\n'.join(transtable)

448

449
    def gen_compiler_skeleton(self) -> str:
450
451
452
453
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
454
        if not self.rules:
455
456
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
457
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
458
459
460
461
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
462
                    self.grammar_name + '", grammar_source=""):',
463
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
464
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
465
                    r"        assert re.match('\w+\Z', grammar_name)", '']
466
        for name in self.rules:
467
            method_name = Compiler.method_name(name)
468
            if name == self.root_symbol:
469
                compiler += ['    def ' + method_name + '(self, node):',
470
471
                             '        return node', '']
            else:
472
                compiler += ['    def ' + method_name + '(self, node):',
473
                             '        pass', '']
474
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
475
        return '\n'.join(compiler)
476

477

478
479
480
481
482
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
483
484
485
486
487
488
489
490
491
492

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

493
494
495
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
496
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
497

498
499
        # add special fields for Grammar class

500
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
501
                            if 'right' in self.directives['literalws'] else "''"))
502
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
503
                            if 'left' in self.directives['literalws'] else "''"))
504
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
505
506
507
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
508
509
510
511
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
512

513
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
514
        declarations = ['class ' + self.grammar_name +
515
                        'Grammar(Grammar):',
516
517
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
518
                        (', with this grammar:' if self.grammar_source else '.')]
519
        definitions.append(('parser_initialization__', '"upon instantiation"'))
520
        if self.grammar_source:
521
            definitions.append(('source_hash__',
522
                                '"%s"' % md5(self.grammar_source, __version__)))
523
            declarations.append('')
524
            declarations += [line for line in self.grammar_source.split('\n')]
525
526
527
528
529
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
530

531
        self.root_symbol = definitions[0][0] if definitions else ""
532
533
534
535
536
537
538
539
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
540
541
542
543
544
545
546

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
547
                # root_node.error_flag = True
548
549
550

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
551
552
553
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
554
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
555
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
556
557
558
559
560
561
562
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
563
564
565
            self.rules[leftover][0].add_error(
                ('Rule "%s" is not connected to parser root "%s" !') % 
                (leftover, self.root_symbol), Error.WARNING)
566

567
        # set root_symbol parser and assemble python grammar definition
568

569
570
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
571
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
572
573
574
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
575

576
577
578

    ## compilation methods

579
    def on_syntax(self, node: Node) -> str:
580
        definitions = []  # type: List[Tuple[str, str]]
581
582

        # drop the wrapping sequence node
583
584
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
585
586

        # compile definitions and directives and collect definitions
587
        for nd in node.children:
588
            if nd.parser.name == "definition":
589
                definitions.append(self.compile(nd))
590
            else:
591
                assert nd.parser.name == "directive", nd.as_sxpr()
592
                self.compile(nd)
593
            node.error_flag = max(node.error_flag, nd.error_flag)
594
        self.definitions.update(definitions)
595

596
        return self.assemble_parser(definitions, node)
597

598

599
    def on_definition(self, node: Node) -> Tuple[str, str]:
600
        rule = str(node.children[0])
601
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
602
603
604
605
606
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
607
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
608
609
610
611
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
612
        elif rule in self.directives['tokens']:
613
            node.add_error('Symbol "%s" has already been defined as '
614
                           'a preprocessor token.' % rule)
615
616
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
617
                           % rule + '(This may change in the future.)')
618
        try:
619
620
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
621
            defn = self.compile(node.children[1])
622
            if rule in self.variables:
623
                defn = 'Capture(%s)' % defn
624
                self.variables.remove(rule)
625
626
627
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
628
        except TypeError as error:
629
630
631
632
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
633
634
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
635
        return rule, defn
636

637

638
    def _check_rx(self, node: Node, rx: str) -> str:
639
640
        """
        Checks whether the string `rx` represents a valid regular
641
642
643
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
644
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
645
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
646
647
648
649
650
651
652
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

653

654
    def on_directive(self, node: Node) -> str:
655
        key = str(node.children[0]).lower()
656
        assert key not in self.directives['tokens']
657

658
659
660
661
662
663
664
665
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
                node.add_error('Directive "%s" has already been defined earlier. ' % key + \
                               'Later definition will be ignored!',
                               code=Error.REDEFINED_DIRECTIVE_WARNING)
                return ""
            self.defined_directives.add(key)

666
        if key in {'comment', 'whitespace'}:
667
668
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
669
                    node.add_error('Directive "%s" must have one, but not %i values.' %
670
                                   (key, len(node.children[1].result)))
671
                value = self.compile(node.children[1]).pop()
672
673
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
674
                else:
675
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
676
            else:
677
678
                value = str(node.children[1]).strip("~")  # cast(str, node.children[1].result).strip("~")
                if value != str(node.children[1]):  # cast(str, node.children[1].result):
679
680
681
682
683
684
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
685
686
687
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
688
            self.directives[key] = value
689

690
        elif key == 'ignorecase':
691
            if str(node.children[1]).lower() not in {"off", "false", "no"}:
692
693
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
694
695
696
        # elif key == 'testing':
        #     value = str(node.children[1])
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
697

698
        elif key == 'literalws':
699
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
700
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
701
                    or ('none' in value and len(value) > 1)):
702
703
704
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
705
            wsp = {'left', 'right'} if 'both' in value \
706
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
707
            self.directives[key] = list(wsp)
708

709
        elif key in {'tokens', 'preprocessor_tokens'}:
710
711
712
713
714
715
716
            tokens = self.compile(node.children[1])
            redeclared = self.directives['tokes'] & tokens
            if redeclared:
                node.add_error('Tokens %s have already been declared earlier. '
                               % str(redeclared) + 'Later declaration will be ignored',
                               code=Error.REDECLARED_TOKEN_WARNING)
            self.directives['tokens'] |= tokens - redeclared
717

718
        elif key.endswith('_filter'):
719
            filter_set = self.compile(node.children[1])
720
721
722
723
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
724

725
726
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
727
                           (key, ', '.join(list(self.directives.keys()))))
728
729
        return ""

730

731
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
732
733
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
734
735
        name for the particular non-terminal.
        """
736
        arguments = [self.compile(r) for r in node.children] + custom_args
737
        node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
738
739
        return parser_class + '(' + ', '.join(arguments) + ')'

740

741
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
742
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
743
744
        return self.non_terminal(node, 'Alternative')

745

746
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
            if nd.parser.ptype == TOKEN_PTYPE and str(nd) == "§":
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
768
        custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
di68kap's avatar
di68kap committed
769
770
771
        compiled = self.non_terminal(node, 'Series', custom_args)
        node.result = saved_result
        return compiled
772

773

774
    def on_factor(self, node: Node) -> str:
775
        assert node.children
776
        assert len(node.children) >= 2, node.as_sxpr()
777
        prefix = str(node.children[0])  # cast(str, node.children[0].result)
778
        custom_args = []  # type: List[str]
779
780

        if prefix in {'::', ':'}:
781
782
            assert len(node.children) == 2
            arg = node.children[-1]
783
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
784
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
785
786
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
787
            if str(arg) in self.directives['filter']:
788
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
789
            self.variables.add(str(arg))  # cast(str, arg.result)
790

791
        elif len(node.children) > 2:
792
793
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
794
795
796
797
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
798

799
        node.result = node.children[1:]
800
801
        try:
            parser_class = self.PREFIX_TABLE[prefix]
802
803
804
805
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
806
807
808
809
810
811
812
813
814
815
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
                        symlist = self.rules.get(str(nd), [])
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
816
817
818
                    if (nd.parser.name != "regexp" or str(nd)[:1] != '/'
                        or str(nd)[-1:] != '/'):
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
819
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
820
821
822
823

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
824
825
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
826
        return ""
827

828

829
    def on_option(self, node) -> str:
830
        return self.non_terminal(node, 'Option')
831

832

833
    def on_repetition(self, node) -> str:
834
835
        return self.non_terminal(node, 'ZeroOrMore')

836

837
    def on_oneormore(self, node) -> str:
838
839
        return self.non_terminal(node, 'OneOrMore')

840

841
    def on_regexchain(self, node) -> str:
842
843
        raise EBNFCompilerError("Not yet implemented!")

844

845
    def on_group(self, node) -> str:
846
847
848
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

849
    def on_unordered(self, node) -> str:
850
851
852
        # return self.non_terminal(node, 'Unordered')
        assert len(node.children) == 1
        nd = node.children[0]
eckhart's avatar
eckhart committed
853
854
        for child in nd.children:
            if child.parser.ptype == TOKEN_PTYPE and str(nd) == "§":
855
                node.add_error("Unordered parser lists cannot contain mandatory (§) items.")
eckhart's avatar
eckhart committed
856
        args = ', '.join(self.compile(child) for child in nd.children)
857
858
859
860
861
862
863
        if nd.parser.name == "term":
            return "AllOf(" + args + ")"
        elif nd.parser.name == "expression":
            return "SomeOf(" + args + ")"
        else:
            node.add_error("Unordered sequence or alternative requires at least two elements.")
            return ""
864

865
866
867
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
        symbol = str(node)  # ; assert result == cast(str, node.result)
        if symbol in self.directives['tokens']:
868
            return 'PreprocessorToken("' + symbol + '")'
869
        else:
870
871
            self.current_symbols.append(node)
            if symbol not in self.symbols: