Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

ebnf.py 40 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
from collections import OrderedDict
21
from functools import partial
22
from typing import Callable, Dict, List, Set, Tuple
23

24
from DHParser.error import Error
25
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, RE, \
26
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
27
28
    Compiler
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
29
from DHParser.syntaxtree import Node, TransformationFunc, WHITESPACE_PTYPE, TOKEN_PTYPE
30
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table
31
from DHParser.transform import traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
32
    reduce_single_child, replace_by_single_child, remove_expendables, \
33
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
34
from DHParser.versionnumber import __version__
35

36
__all__ = ('get_ebnf_preprocessor',
37
38
39
40
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
41
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
42
           'EBNFCompilerError',
43
           'EBNFCompiler',
44
           'grammar_changed',
45
           'PreprocessorFactoryFunc',
46
47
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
48
           'CompilerFactoryFunc')
49
50


Eckhart Arnold's avatar
Eckhart Arnold committed
51
52
53
54
55
56
57
########################################################################
#
# EBNF scanning
#
########################################################################


58
59
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
60
61
62
63
64
65
66
67


########################################################################
#
# EBNF parsing
#
########################################################################

68

di68kap's avatar
di68kap committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
class EBNFGrammar(Grammar):
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol "=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] oneormore
88
89
                | [flowmarker] group
                | [flowmarker] unordered
di68kap's avatar
di68kap committed
90
91
92
93
94
95
96
                | repetition
                | option

    flowmarker =  "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
                | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

97
98
    group      =  "(" §expression ")"
    unordered  =  "<" §expression ">"                # elements of expression in arbitrary order
di68kap's avatar
di68kap committed
99
    oneormore  =  "{" expression "}+"
100
101
    repetition =  "{" §expression "}"
    option     =  "[" §expression "]"
di68kap's avatar
di68kap committed
102
103
104
105
106
107
108
109
110
111
112
113

    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:\\\/|[^\/])*?\/~?/~        # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
114
115
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
116
117
118
119
120
121
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
122
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
di68kap's avatar
di68kap committed
123
124
125
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
126
127
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
128
    oneormore = Series(Token("{"), expression, Token("}+"))
129
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
130
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
131
132
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
133
134
135
136
137
    factor = Alternative(
        Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
        Series(Option(flowmarker), literal), Series(Option(flowmarker), regexp),
        Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
        Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
138
139
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
140
141
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_),
                       mandatory=1)
di68kap's avatar
di68kap committed
142
    definition = Series(symbol, Token("="), expression, mandatory=1)
143
144
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)),
                    EOF, mandatory=2)
di68kap's avatar
di68kap committed
145
146
147
    root__ = syntax


148
def grammar_changed(grammar_class, grammar_source: str) -> bool:
149
150
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
169
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
170
171
172
173
174
175
176
177
178
179
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


180
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


197
EBNF_AST_transformation_table = {
198
    # AST Transformations for EBNF-grammar
199
    "+":
200
        remove_expendables,
201
    "syntax":
202
        [],  # otherwise '"*": replace_by_single_child' would be applied
203
    "directive, definition":
204
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
205
    "expression":
206
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
207
    "term":
208
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
209
    "factor, flowmarker, retrieveop":
210
        replace_by_single_child,
211
    "group":
212
        [remove_brackets, replace_by_single_child],
213
214
    "unordered":
        remove_brackets,
215
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
216
        [reduce_single_child, remove_brackets,
217
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
218
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
219
        reduce_single_child,
220
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
221
        reduce_single_child,
222
    "list_":
223
        [flatten, remove_infix_operator],
224
    "*":
225
        replace_by_single_child
226
227
}

228

Eckhart Arnold's avatar
Eckhart Arnold committed
229
def EBNFTransform() -> TransformationFunc:
230
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
231

232
def get_ebnf_transformer() -> TransformationFunc:
233
234
235
236
237
238
239
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
240
241
242
243
244
245
246
247


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

248

249
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
250
ParserFactoryFunc = Callable[[], Grammar]
251
TransformerFactoryFunc = Callable[[], TransformationFunc]
252
253
CompilerFactoryFunc = Callable[[], Compiler]

254
255
256
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
257
258
259
260
'''


GRAMMAR_FACTORY = '''
261
def get_grammar() -> {NAME}Grammar:
262
263
264
265
266
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
267
268
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
269
270
271
272
'''


TRANSFORMER_FACTORY = '''
273
274
275
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

276
def get_transformer() -> TransformationFunc:
277
278
279
280
281
282
283
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
284
285
286
287
'''


COMPILER_FACTORY = '''
288
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
289
290
291
292
293
294
295
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
296
297
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
298
299
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
300

301
302
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
303
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
304
305
306
    pass


307
class EBNFCompiler(Compiler):
308
309
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
310
    in EBNF-Notation.
311
312
313
314
315
316
317
318
319
320
321
322
323

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
324
        current_symbols:  During compilation, a list containing the root
325
326
327
328
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

329
        rules:  Dictionary that maps rule names to a list of Nodes that
330
331
332
333
334
335
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

336
                Now `[node.content for node in self.rules['alternative']]`
337
338
                yields `['alternative = a | b', 'a', 'b']`

339
        symbols:  A mapping of symbol names to their first usage (not
340
341
                their definition!) in the EBNF source.

342
        variables:  A set of symbols names that are used with the
343
344
345
346
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

347
        recursive:  A set of symbols that are used recursively and
348
349
                therefore require a `Forward`-operator.

350
        definitions:  A dictionary of definitions. Other than `rules`
351
352
                this maps the symbols to their compiled definienda.

353
        deferred_taks:  A list of callables that is filled during
354
355
356
357
358
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

359
        root:   The name of the root symbol.
360

361
        directives:  A dictionary of all directives and their default
362
                values.
363
364
365

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
366
367
    """
    COMMENT_KEYWORD = "COMMENT__"
368
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
369
370
    RAW_WS_KEYWORD = "WHITESPACE__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
371
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
372
                "Potentially due to erroneous AST transformation."
373
374
375
376
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
377
378
379
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
380
    REPEATABLE_DIRECTIVES = {'tokens'}
381

382

383
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
384
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
385
386
        self._reset()

387

388
    def _reset(self):
389
        super(EBNFCompiler, self)._reset()
390
        self._result = ''           # type: str
391
        self.re_flags = set()       # type: Set[str]
392
393
394
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
395
396
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
397
        self.definitions = {}       # type: Dict[str, str]
398
        self.deferred_tasks = []    # type: List[Callable]
399
        self.root_symbol = ""       # type: str
400
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
401
                           'comment': '',
402
                           'literalws': {'right'},
403
                           'tokens': set(),  # alt. 'preprocessor_tokens'
404
405
406
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
407

Eckhart Arnold's avatar
Eckhart Arnold committed
408
    @property
409
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
410
411
        return self._result

412
    # methods for generating skeleton code for preprocessor, transformer, and compiler
413

414
    def gen_preprocessor_skeleton(self) -> str:
415
416
417
418
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
419
        name = self.grammar_name + "Preprocessor"
420
        return "def %s(text):\n    return text, lambda i: i\n" % name \
421
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
422

423

424
    def gen_transformer_skeleton(self) -> str:
425
426
427
428
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
429
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
430
431
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
432
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
433
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
434
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
435
        transtable.append('    "+": remove_empty,')
436
        for name in self.rules:
eckhart's avatar
eckhart committed
437
            transformations = '[]'
438
439
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
440
                transformations = '[replace_or_reduce]'
441
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
442
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
443
            transtable.append('    "' + name + '": %s,' % transformations)
Eckhart Arnold's avatar
Eckhart Arnold committed
444
        transtable.append('    ":Token, :RE": reduce_single_child,')
445
        transtable += ['    "*": replace_by_single_child', '}', '']
446
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
447
448
        return '\n'.join(transtable)

449

450
    def gen_compiler_skeleton(self) -> str:
451
452
453
454
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
455
        if not self.rules:
456
457
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
458
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
459
460
461
462
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
463
                    self.grammar_name + '", grammar_source=""):',
464
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
465
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
466
                    r"        assert re.match('\w+\Z', grammar_name)", '']
467
        for name in self.rules:
468
            method_name = Compiler.method_name(name)
469
            if name == self.root_symbol:
470
                compiler += ['    def ' + method_name + '(self, node):',
471
472
                             '        return node', '']
            else:
di68kap's avatar
di68kap committed
473
474
                compiler += ['    # def ' + method_name + '(self, node):',
                             '    #     return node', '']
475
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
476
        return '\n'.join(compiler)
477

478
479
480
481
482
483
484
485
486
487
488
489
    def verify_transformation_table(self, transtable):
        assert self._dirty_flag
        table_entries = set(expand_table(transtable).keys()) - {'*', '+', '~'}
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
                                      Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
        return messages

490

491
492
493
494
495
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
496
497
498
499
500
501
502
503
504
505

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

506
507
508
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
509
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
510

511
512
        # add special fields for Grammar class

513
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
514
                            if 'right' in self.directives['literalws'] else "''"))
515
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
516
                            if 'left' in self.directives['literalws'] else "''"))
517
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
518
519
520
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
521
522
523
524
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
525

526
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
527
        declarations = ['class ' + self.grammar_name +
528
                        'Grammar(Grammar):',
529
530
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
531
                        (', with this grammar:' if self.grammar_source else '.')]
532
        definitions.append(('parser_initialization__', '"upon instantiation"'))
533
        if self.grammar_source:
534
            definitions.append(('source_hash__',
535
                                '"%s"' % md5(self.grammar_source, __version__)))
536
            declarations.append('')
537
            declarations += [line for line in self.grammar_source.split('\n')]
538
539
540
541
542
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
543

544
        self.root_symbol = definitions[0][0] if definitions else ""
545
546
547
548
549
550
551
552
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
553
554
555
556
557
558
559

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
560
                # root_node.error_flag = True
561
562
563

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
564
565
566
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
567
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
568
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
569
570
571
572
573
574
575
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
576
577
578
            self.rules[leftover][0].add_error(
                ('Rule "%s" is not connected to parser root "%s" !') % 
                (leftover, self.root_symbol), Error.WARNING)
579

580
        # set root_symbol parser and assemble python grammar definition
581

582
583
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
584
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
585
586
587
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
588

589
590
591

    ## compilation methods

592
    def on_syntax(self, node: Node) -> str:
593
        definitions = []  # type: List[Tuple[str, str]]
594
595

        # drop the wrapping sequence node
596
597
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
598
599

        # compile definitions and directives and collect definitions
600
        for nd in node.children:
601
            if nd.parser.name == "definition":
602
                definitions.append(self.compile(nd))
603
            else:
604
                assert nd.parser.name == "directive", nd.as_sxpr()
605
                self.compile(nd)
606
            node.error_flag = max(node.error_flag, nd.error_flag)
607
        self.definitions.update(definitions)
608

609
        return self.assemble_parser(definitions, node)
610

611

612
    def on_definition(self, node: Node) -> Tuple[str, str]:
613
        rule = node.children[0].content
614
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
615
616
617
618
619
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
620
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
621
622
623
624
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
625
        elif rule in self.directives['tokens']:
626
            node.add_error('Symbol "%s" has already been defined as '
627
                           'a preprocessor token.' % rule)
628
629
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
630
                           % rule + '(This may change in the future.)')
631
        try:
632
633
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
634
            defn = self.compile(node.children[1])
635
            if rule in self.variables:
636
                defn = 'Capture(%s)' % defn
637
                self.variables.remove(rule)
638
639
640
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
641
        except TypeError as error:
642
643
644
645
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
646
647
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
648
        return rule, defn
649

650

651
    def _check_rx(self, node: Node, rx: str) -> str:
652
653
        """
        Checks whether the string `rx` represents a valid regular
654
655
656
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
657
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
658
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
659
660
661
662
663
664
665
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

666

667
    def on_directive(self, node: Node) -> str:
668
        key = node.children[0].content.lower()
669
        assert key not in self.directives['tokens']
670

671
672
673
674
675
676
677
678
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
                node.add_error('Directive "%s" has already been defined earlier. ' % key + \
                               'Later definition will be ignored!',
                               code=Error.REDEFINED_DIRECTIVE_WARNING)
                return ""
            self.defined_directives.add(key)

679
        if key in {'comment', 'whitespace'}:
680
681
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
682
                    node.add_error('Directive "%s" must have one, but not %i values.' %
683
                                   (key, len(node.children[1].result)))
684
                value = self.compile(node.children[1]).pop()
685
686
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
687
                else:
688
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
689
            else:
690
691
692
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
693
694
695
696
697
698
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
699
700
701
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
702
            self.directives[key] = value
703

704
        elif key == 'ignorecase':
705
            if node.children[1].content.lower() not in {"off", "false", "no"}:
706
707
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
708
        # elif key == 'testing':
709
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
710
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
711

712
        elif key == 'literalws':
713
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
714
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
715
                    or ('none' in value and len(value) > 1)):
716
717
718
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
719
            wsp = {'left', 'right'} if 'both' in value \
720
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
721
            self.directives[key] = list(wsp)
722

723
        elif key in {'tokens', 'preprocessor_tokens'}:
724
            tokens = self.compile(node.children[1])
725
            redeclared = self.directives['tokens'] & tokens
726
727
728
729
730
            if redeclared:
                node.add_error('Tokens %s have already been declared earlier. '
                               % str(redeclared) + 'Later declaration will be ignored',
                               code=Error.REDECLARED_TOKEN_WARNING)
            self.directives['tokens'] |= tokens - redeclared
731

732
        elif key.endswith('_filter'):
733
            filter_set = self.compile(node.children[1])
734
735
736
737
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
738

739
740
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
741
                           (key, ', '.join(list(self.directives.keys()))))
742
743
        return ""

744

745
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
746
747
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
748
749
        name for the particular non-terminal.
        """
750
        arguments = [self.compile(r) for r in node.children] + custom_args
751
        node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
752
753
        return parser_class + '(' + ', '.join(arguments) + ')'

754

755
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
756
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
757
758
        return self.non_terminal(node, 'Alternative')

759

760
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
761
762
763
764
765
766
767
768
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
769
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
di68kap's avatar
di68kap committed
770
771
772
773
774
775
776
777
778
779
780
781
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
782
        custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
di68kap's avatar
di68kap committed
783
784
785
        compiled = self.non_terminal(node, 'Series', custom_args)
        node.result = saved_result
        return compiled
786

787

788
    def on_factor(self, node: Node) -> str:
789
        assert node.children
790
        assert len(node.children) >= 2, node.as_sxpr()
791
        prefix = node.children[0].content
792
        custom_args = []  # type: List[str]
793
794

        if prefix in {'::', ':'}:
795
796
            assert len(node.children) == 2
            arg = node.children[-1]
797
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
798
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
799
800
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
801
            if str(arg) in self.directives['filter']:
802
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
803
            self.variables.add(str(arg))  # cast(str, arg.result)
804

805
        elif len(node.children) > 2:
806
807
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
808
809
810
811
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
812

813
        node.result = node.children[1:]
814
815
        try:
            parser_class = self.PREFIX_TABLE[prefix]
816
817
818
819
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
820
821
822
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
823
                        symlist = self.rules.get(nd.content, [])
824
825
826
827
828
829
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
830
831
                    if (nd.parser.name != "regexp" or nd.content[:1] != '/'
                            or nd.content[-1:] != '/'):
832
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
833
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
834
835
836
837

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
838
839
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
840
        return ""
841

842

843
    def on_option(self, node) -> str:
844
        return self.non_terminal(node, 'Option')
845

846

847
    def on_repetition(self, node) -> str:
848
849
        return self.non_terminal(node, 'ZeroOrMore')

850

851
    def on_oneormore(self, node) -> str:
852
853
        return self.non_terminal(node, 'OneOrMore')

854

855
    def on_group(self, node) -> str:
856
857
858
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

859
    def on_unordered(self, node) -> str:
860
861
862
        # return self.non_terminal(node, 'Unordered')
        assert len(node.children) == 1
        nd = node.children[0]
eckhart's avatar
eckhart committed
863
        for child in nd.children:
864
            if child.parser.ptype == TOKEN_PTYPE and nd.content == "§":
865
                node.add_error("Unordered parser lists cannot contain mandatory (§) items.")