The expiration time for new job artifacts in CI/CD pipelines is now 30 days (GitLab default). Previously generated artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

ebnf.py 42.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
import keyword
28
from collections import OrderedDict
29
from functools import partial
30

eckhart's avatar
eckhart committed
31
from DHParser.compile import CompilerError, Compiler
32
from DHParser.error import Error
33
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, RE, \
34
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
35
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
36
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
37
38
39
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
    typing
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
40
    reduce_single_child, replace_by_single_child, remove_expendables, \
41
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
42
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
43
44
from typing import Callable, Dict, List, Set, Tuple

45

46
__all__ = ('get_ebnf_preprocessor',
47
48
49
50
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
51
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
52
           'EBNFCompilerError',
53
           'EBNFCompiler',
54
           'grammar_changed',
55
           'PreprocessorFactoryFunc',
56
57
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
58
           'CompilerFactoryFunc')
59
60


Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
########################################################################
#
# EBNF scanning
#
########################################################################


68
69
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
70
71
72
73
74
75
76
77


########################################################################
#
# EBNF parsing
#
########################################################################

78

di68kap's avatar
di68kap committed
79
class EBNFGrammar(Grammar):
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" ( regexp | literal | list_ )

    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
               | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
    regexp     = /~?\/(?:\\\/|[^\/])*?\/~?/~        # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                    # '~' is a whitespace-marker, if present leading or trailing
                                                    # whitespace of a regular expression will be ignored tacitly.
    whitespace = /~/~                               # implicit or default whitespace
    list_      = /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                    # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF = !/./
    """
di68kap's avatar
di68kap committed
127
    expression = Forward()
128
129
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
130
131
132
133
134
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
135
    whitespace__ = Whitespace(WSP__)
di68kap's avatar
di68kap committed
136
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
137
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
138
    whitespace = RE('~')
di68kap's avatar
di68kap committed
139
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
140
    plaintext = RE('`(?:[^"]|\\\\")*?`')
di68kap's avatar
di68kap committed
141
142
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
143
144
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
145
    oneormore = Series(Token("{"), expression, Token("}+"))
146
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
147
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
148
149
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
150
151
152
153
154
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Series(Option(flowmarker), literal), Series(Option(flowmarker), plaintext),
                         Series(Option(flowmarker), regexp), Series(Option(flowmarker), whitespace),
                         Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
                         Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
155
156
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
157
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_), mandatory=1)
di68kap's avatar
di68kap committed
158
    definition = Series(symbol, Token("="), expression, mandatory=1)
159
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
160
161
162
    root__ = syntax


163
def grammar_changed(grammar_class, grammar_source: str) -> bool:
164
165
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
184
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
185
186
187
188
189
190
191
192
193
194
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


195
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


212
EBNF_AST_transformation_table = {
213
    # AST Transformations for EBNF-grammar
214
    "+":
215
        remove_expendables,
216
    "syntax":
217
        [],  # otherwise '"*": replace_by_single_child' would be applied
218
    "directive, definition":
219
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
220
    "expression":
221
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
222
    "term":
223
224
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
225
    "factor, flowmarker, retrieveop":
226
        replace_by_single_child,
227
    "group":
228
        [remove_brackets, replace_by_single_child],
229
230
    "unordered":
        remove_brackets,
231
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
232
        [reduce_single_child, remove_brackets,
233
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
234
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
235
        reduce_single_child,
236
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
237
        reduce_single_child,
238
    "list_":
239
        [flatten, remove_infix_operator],
240
    "*":
241
        replace_by_single_child
242
243
}

244

Eckhart Arnold's avatar
Eckhart Arnold committed
245
def EBNFTransform() -> TransformationFunc:
246
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
247

248
def get_ebnf_transformer() -> TransformationFunc:
249
250
251
252
253
254
255
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
256
257
258
259
260
261
262
263


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

264

265
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
266
ParserFactoryFunc = Callable[[], Grammar]
267
TransformerFactoryFunc = Callable[[], TransformationFunc]
268
269
CompilerFactoryFunc = Callable[[], Compiler]

270
271
272
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
273
274
275
276
'''


GRAMMAR_FACTORY = '''
277
def get_grammar() -> {NAME}Grammar:
278
279
280
281
282
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
283
284
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
285
286
287
288
'''


TRANSFORMER_FACTORY = '''
289
290
291
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

292
def get_transformer() -> TransformationFunc:
293
294
295
296
297
298
299
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
300
301
302
303
'''


COMPILER_FACTORY = '''
304
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
305
306
307
308
309
310
311
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
312
313
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
314
315
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
316

eckhart's avatar
eckhart committed
317
class EBNFCompilerError(CompilerError):
318
    """Error raised by `EBNFCompiler` class. (Not compilation errors
319
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
320
321
322
    pass


323
class EBNFCompiler(Compiler):
324
325
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
326
    in EBNF-Notation.
327
328
329
330
331
332
333
334
335
336
337
338
339

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
340
        current_symbols:  During compilation, a list containing the root
341
342
343
344
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

345
        rules:  Dictionary that maps rule names to a list of Nodes that
346
347
348
349
350
351
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

352
                Now `[node.content for node in self.rules['alternative']]`
353
354
                yields `['alternative = a | b', 'a', 'b']`

355
        symbols:  A mapping of symbol names to their first usage (not
356
357
                their definition!) in the EBNF source.

358
        variables:  A set of symbols names that are used with the
359
360
361
362
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

363
        recursive:  A set of symbols that are used recursively and
364
365
                therefore require a `Forward`-operator.

366
        definitions:  A dictionary of definitions. Other than `rules`
367
368
                this maps the symbols to their compiled definienda.

369
        deferred_taks:  A list of callables that is filled during
370
371
372
373
374
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

375
        root:   The name of the root symbol.
376

377
        directives:  A dictionary of all directives and their default
378
                values.
379
380
381

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
382
383
    """
    COMMENT_KEYWORD = "COMMENT__"
384
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
385
    RAW_WS_KEYWORD = "WHITESPACE__"
386
    WHITESPACE_PARSER_KEYWORD = "whitespace__"
Eckhart Arnold's avatar
Eckhart Arnold committed
387
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
388
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
389
                "Potentially due to erroneous AST transformation."
390
391
392
393
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
394
395
396
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
397
    REPEATABLE_DIRECTIVES = {'tokens'}
398

399

400
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
401
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
402
403
        self._reset()

404

405
    def _reset(self):
406
        super(EBNFCompiler, self)._reset()
407
        self._result = ''           # type: str
408
        self.re_flags = set()       # type: Set[str]
409
410
411
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
412
413
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
414
        self.definitions = {}       # type: Dict[str, str]
415
        self.deferred_tasks = []    # type: List[Callable]
416
        self.root_symbol = ""       # type: str
eckhart's avatar
eckhart committed
417
        self.directives = {'whitespace': self.WHITESPACE['vertical'],
418
                           'comment': '',
419
                           'literalws': {'right'},
420
                           'tokens': set(),  # alt. 'preprocessor_tokens'
421
422
423
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
424

Eckhart Arnold's avatar
Eckhart Arnold committed
425
    @property
426
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
427
428
        return self._result

429
    # methods for generating skeleton code for preprocessor, transformer, and compiler
430

431
    def gen_preprocessor_skeleton(self) -> str:
432
433
434
435
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
436
        name = self.grammar_name + "Preprocessor"
437
        return "def %s(text):\n    return text, lambda i: i\n" % name \
438
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
439

440

441
    def gen_transformer_skeleton(self) -> str:
442
443
444
445
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
446
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
447
448
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
449
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
450
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
451
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
452
        transtable.append('    "+": remove_empty,')
453
        for name in self.rules:
eckhart's avatar
eckhart committed
454
            transformations = '[]'
455
456
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
457
                transformations = '[replace_or_reduce]'
458
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
459
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
460
            transtable.append('    "' + name + '": %s,' % transformations)
Eckhart Arnold's avatar
Eckhart Arnold committed
461
        transtable.append('    ":Token, :RE": reduce_single_child,')
462
        transtable += ['    "*": replace_by_single_child', '}', '']
463
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
464
465
        return '\n'.join(transtable)

466

467
    def gen_compiler_skeleton(self) -> str:
468
469
470
471
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
472
        if not self.rules:
473
474
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
475
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
476
477
478
479
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
480
                    self.grammar_name + '", grammar_source=""):',
481
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
482
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
483
                    r"        assert re.match('\w+\Z', grammar_name)", '']
484
        for name in self.rules:
485
            method_name = Compiler.method_name(name)
486
            if name == self.root_symbol:
487
                compiler += ['    def ' + method_name + '(self, node):',
488
                             '        return self.fallback_compiler(node)', '']
489
            else:
di68kap's avatar
di68kap committed
490
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
491
                             '    #     return node', '']
492
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
493
        return '\n'.join(compiler)
494

495
    def verify_transformation_table(self, transtable):
496
497
498
499
500
501
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
502
503
504
505
506
507
508
509
        assert self._dirty_flag
        table_entries = set(expand_table(transtable).keys()) - {'*', '+', '~'}
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
di68kap's avatar
di68kap committed
510
                                      Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE, 0))
511
512
        return messages

513

514
515
516
517
518
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
519
520
521
522
523
524
525
526
527
528

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

529
530
531
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
532
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
533

534
535
        # add special fields for Grammar class

536
537
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
538
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
539
                            if 'right' in self.directives['literalws'] else "''"))
540
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
541
                            if 'left' in self.directives['literalws'] else "''"))
542
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
543
544
545
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
546
547
548
549
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
550

551
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
552
        declarations = ['class ' + self.grammar_name +
553
                        'Grammar(Grammar):',
554
555
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
556
                        (', with this grammar:' if self.grammar_source else '.')]
557
        definitions.append(('parser_initialization__', '"upon instantiation"'))
558
        if self.grammar_source:
559
            definitions.append(('source_hash__',
560
                                '"%s"' % md5(self.grammar_source, __version__)))
561
            declarations.append('')
562
            declarations += [line for line in self.grammar_source.split('\n')]
563
564
565
566
567
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
568

569
        self.root_symbol = definitions[0][0] if definitions else ""
570
571
572
573
574
575
576
577
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
578
579
580
581
582
583

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
584
585
                self.tree.add_error(self.symbols[symbol],
                               "Missing definition for symbol '%s'" % symbol)
586
                # root_node.error_flag = True
587
588
589

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
590
591
592
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
593
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
594
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
595
596
597
598
599
600
601
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
602
            self.tree.add_error(self.rules[leftover][0],
603
                ('Rule "%s" is not connected to parser root "%s" !') %
eckhart's avatar
eckhart committed
604
                (leftover, self.root_symbol), Error.WARNING)
605

606
        # set root_symbol parser and assemble python grammar definition
607

608
609
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
610
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
611
612
613
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
614

615
616
617

    ## compilation methods

618
    def on_syntax(self, node: Node) -> str:
619
        definitions = []  # type: List[Tuple[str, str]]
620
621

        # drop the wrapping sequence node
622
623
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
624
625

        # compile definitions and directives and collect definitions
626
        for nd in node.children:
627
            if nd.parser.name == "definition":
628
                definitions.append(self.compile(nd))
629
            else:
630
                assert nd.parser.name == "directive", nd.as_sxpr()
631
                self.compile(nd)
632
            # node.error_flag = max(node.error_flag, nd.error_flag)
633
        self.definitions.update(definitions)
634

635
        return self.assemble_parser(definitions, node)
636

637

638
    def on_definition(self, node: Node) -> Tuple[str, str]:
639
        rule = node.children[0].content
640
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
641
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
642
            if not first.errors:
643
644
645
                self.tree.add_error(first, 'First definition of rule "%s" '
                               'followed by illegal redefinitions.' % rule)
            self.tree.add_error(node, 'A rule "%s" has already been defined earlier.' % rule)
646
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
647
            self.tree.add_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
648
        elif not sane_parser_name(rule):
649
            self.tree.add_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
650
                                ' end with a doube underscore "__".' % rule)
651
        elif rule in self.directives['tokens']:
eckhart's avatar
eckhart committed
652
653
            self.tree.add_error(node, 'Symbol "%s" has already been defined as '
                                'a preprocessor token.' % rule)
654
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
655
656
            self.tree.add_error(node, 'Python keyword "%s" may not be used as a symbol. '
                                % rule + '(This may change in the future.)')
657
        try:
658
659
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
660
            defn = self.compile(node.children[1])
661
            if rule in self.variables:
662
                defn = 'Capture(%s)' % defn
663
                self.variables.remove(rule)
664
665
666
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
667
        except TypeError as error:
668
669
670
671
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
672
            self.tree.add_error(node, errmsg)
673
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
674
        return rule, defn
675

676

677
    def _check_rx(self, node: Node, rx: str) -> str:
678
679
        """
        Checks whether the string `rx` represents a valid regular
680
681
682
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
683
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
684
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
685
686
687
        try:
            re.compile(rx)
        except Exception as re_error:
eckhart's avatar
eckhart committed
688
689
            self.tree.add_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
690
691
        return rx

692

693
    def on_directive(self, node: Node) -> str:
694
        key = node.children[0].content.lower()
695
        assert key not in self.directives['tokens']
696

697
698
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
699
700
701
                self.tree.add_error(node, 'Directive "%s" has already been defined earlier. '
                                    % key + 'Later definition will be ignored!',
                                    code=Error.REDEFINED_DIRECTIVE_WARNING)
702
703
704
                return ""
            self.defined_directives.add(key)

705
        if key in {'comment', 'whitespace'}:
706
707
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
eckhart's avatar
eckhart committed
708
709
                    self.tree.add_error(node, 'Directive "%s" must have one, but not %i values.'
                                        % (key, len(node.children[1].result)))
710
                value = self.compile(node.children[1]).pop()
711
712
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
713
                else:
eckhart's avatar
eckhart committed
714
715
                    self.tree.add_error(node, 'Value "%s" not allowed for directive "%s".'
                                        % (value, key))
716
            else:
717
718
719
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
eckhart's avatar
eckhart committed
720
721
                    self.tree.add_error(node, "Whitespace marker '~' not allowed in definition "
                                        "of %s regular expression." % key)
722
723
724
725
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
726
                if key == 'whitespace' and not re.match(value, ''):
eckhart's avatar
eckhart committed
727
728
                    self.tree.add_error(node, "Implicit whitespace should always "
                                        "match the empty string, /%s/ does not." % value)
729
            self.directives[key] = value
730

731
        elif key == 'ignorecase':
732
            if node.children[1].content.lower() not in {"off", "false", "no"}:
733
734
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
735
        # elif key == 'testing':
736
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
737
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
738

739
        elif key == 'literalws':
740
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
741
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
742
                    or ('none' in value and len(value) > 1)):
eckhart's avatar
eckhart committed
743
744
                self.tree.add_error(node, 'Directive "literalws" allows only `left`, `right`, '
                                    '`both` or `none`, not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
745
            wsp = {'left', 'right'} if 'both' in value \
746
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
747
            self.directives[key] = list(wsp)
748

749
        elif key in {'tokens', 'preprocessor_tokens'}:
750
            tokens = self.compile(node.children[1])
751
            redeclared = self.directives['tokens'] & tokens
752
            if redeclared:
eckhart's avatar
eckhart committed
753
754
755
                self.tree.add_error(node, 'Tokens %s have already been declared earlier. '
                                    % str(redeclared) + 'Later declaration will be ignored',
                                    code=Error.REDECLARED_TOKEN_WARNING)
756
            self.directives['tokens'] |= tokens - redeclared
757

758
        elif key.endswith('_filter'):
759
            filter_set = self.compile(node.children[1])
760
            if not isinstance(filter_set, set) or len(filter_set) != 1:
eckhart's avatar
eckhart committed
761
762
                self.tree.add_error(node, 'Directive "%s" accepts exactly on symbol, not %s'
                                    % (key, str(filter_set)))
763
            self.directives['filter'][key[:-7]] = filter_set.pop()
764

765
        else:
eckhart's avatar
eckhart committed
766
767
            self.tree.add_error(node, 'Unknown directive %s ! (Known ones are %s .)' %
                                (key, ', '.join(list(self.directives.keys()))))
768
769
        return ""

770

771
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
772
773
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
774
775
        name for the particular non-terminal.
        """
776
        arguments = [self.compile(r) for r in node.children] + custom_args
777
        # node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
778
779
        return parser_class + '(' + ', '.join(arguments) + ')'

780

781
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
782
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
783
784
        return self.non_terminal(node, 'Alternative')

785

786
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
787
788
789
790
791
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
eckhart's avatar
eckhart committed
792
        filtered_children = []  # type: List[Node]
di68kap's avatar
di68kap committed
793
        for nd in node.children:
794
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
eckhart's avatar
eckhart committed
795
796
                mandatory_marker.append(len(filtered_children))
                # if len(filtered_children) == 0:
eckhart's avatar
eckhart committed
797
798
                #     self.tree.add_error(nd.pos, 'First item of a series should not be mandatory.',
                #                         Error.WARNING)
eckhart's avatar
eckhart committed
799
                if len(mandatory_marker) > 1:
eckhart's avatar
eckhart committed
800
801
                    self.tree.add_error(nd, 'One mandatory marker (§) sufficient to declare '
                                        'the rest of the series as mandatory.', Error.WARNING)
di68kap's avatar
di68kap committed
802
803
804
805
            else:
                filtered_children.append(nd)
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
806
807
808
809
810
        if len(filtered_children) == 1:
            compiled = self.non_terminal(node, 'Required')
        else:
            custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
            compiled = self.non_terminal(node, 'Series', custom_args)
di68kap's avatar
di68kap committed
811
812
        node.result = saved_result
        return compiled
813

814

815
    def on_factor(self, node: Node) -> str:
816
        assert node.children
817
        assert len(node.children) >= 2, node.as_sxpr()
818
        prefix = node.children[0].content
819
        custom_args = []  # type: List[str]
820
821

        if prefix in {'::', ':'}:
822
823
            assert len(node.children) == 2
            arg = node.children[-1]
824
            if arg.parser.name != 'symbol':
eckhart's avatar
eckhart committed
825
826
                self.tree.add_error(node, ('Retrieve Operator "%s" requires a symbol, '
                                    'and not a %s.') % (prefix, str(arg.parser)))
827
                return str(arg.result)
828
            if str(arg) in self.directives['filter']:
829
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
830
            self.variables.add(str(arg))  # cast(str, arg.result)
831

832
        elif len(node.children) > 2:
833
834
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
835
836
837
838
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
839

840
        node.result = node.children[1:]
841
842
        try:
            parser_class = self.PREFIX_TABLE[prefix]
843
844
845
846