Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

ebnf.py 40.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
import keyword
28
from collections import OrderedDict
29
from functools import partial
30
from typing import Callable, Dict, List, Set, Tuple
31

32
from DHParser.error import Error
33
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, RE, \
34
35
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
from DHParser import Compiler, TransformationFunc
36
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
37
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
38
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table
39
from DHParser.transform import traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
40
    reduce_single_child, replace_by_single_child, remove_expendables, \
41
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
42
from DHParser.versionnumber import __version__
43

44
__all__ = ('get_ebnf_preprocessor',
45
46
47
48
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
49
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
50
           'EBNFCompilerError',
51
           'EBNFCompiler',
52
           'grammar_changed',
53
           'PreprocessorFactoryFunc',
54
55
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
56
           'CompilerFactoryFunc')
57
58


Eckhart Arnold's avatar
Eckhart Arnold committed
59
60
61
62
63
64
65
########################################################################
#
# EBNF scanning
#
########################################################################


66
67
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
68
69
70
71
72
73
74
75


########################################################################
#
# EBNF parsing
#
########################################################################

76

di68kap's avatar
di68kap committed
77
class EBNFGrammar(Grammar):
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
    r"""
    Parser for an EBNF source file, with this grammar::

        # EBNF-Grammar in EBNF

        @ comment    =  /#.*(?:\n|$)/                # comments start with '#' and
                                                     # eat all chars up to and including '\n'
        @ whitespace =  /\s*/                        # whitespace includes linefeed
        @ literalws  =  right                        # trailing whitespace of literals will be
                                                     # ignored tacitly

        syntax     =  [~//] { definition | directive } §EOF
        definition =  symbol §"=" expression
        directive  =  "@" §symbol "=" ( regexp | literal | list_ )

        expression =  term { "|" term }
        term       =  { ["§"] factor }+               # "§" means all following factors mandatory
        factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure
                                                              # it's not a definition
                    | [flowmarker] literal
                    | [flowmarker] regexp
                    | [flowmarker] oneormore
                    | [flowmarker] group
                    | [flowmarker] unordered
                    | repetition
                    | option

        flowmarker =  "!"  | "&"                     # '!' negative lookahead, '&' positive lookahead
                    | "-!" | "-&"                    # '-' negative lookbehind, '-&' positive lookbehind
        retrieveop =  "::" | ":"                     # '::' pop, ':' retrieve

        group      =  "(" §expression ")"
        unordered  =  "<" §expression ">"            # elements of expression in arbitrary order
        oneormore  =  "{" expression "}+"
        repetition =  "{" §expression "}"
        option     =  "[" §expression "]"

        symbol     =  /(?!\d)\w+/~                   # e.g. expression, factor, parameter_list
        literal    =  /"(?:[^"]|\\")*?"/~            # e.g. "(", '+', 'while'
                    | /'(?:[^']|\\')*?'/~            # whitespace following literals will be ignored
        regexp     =  /~?\/(?:\\\/|[^\/])*?\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading
                                                     # or trailing whitespace of a regular expression
                                                     # will be ignored tacitly.
        list_      =  /\w+/~ { "," /\w+/~ }          # comma separated list of symbols,
                                                     # e.g. BEGIN_LIST, END_LIST,
                                                     # BEGIN_QUOTE, END_QUOTE
                                                     # see CommonMark/markdown.py for an exmaple
        EOF =  !/./
"""
di68kap's avatar
di68kap committed
128
    expression = Forward()
129
130
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
131
132
133
134
135
136
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
137
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
di68kap's avatar
di68kap committed
138
139
140
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
141
142
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
143
    oneormore = Series(Token("{"), expression, Token("}+"))
144
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
145
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
146
147
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
148
149
150
151
152
    factor = Alternative(
        Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
        Series(Option(flowmarker), literal), Series(Option(flowmarker), regexp),
        Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
        Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
153
154
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
155
156
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_),
                       mandatory=1)
di68kap's avatar
di68kap committed
157
    definition = Series(symbol, Token("="), expression, mandatory=1)
158
159
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)),
                    EOF, mandatory=2)
di68kap's avatar
di68kap committed
160
161
162
    root__ = syntax


163
def grammar_changed(grammar_class, grammar_source: str) -> bool:
164
165
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
184
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
185
186
187
188
189
190
191
192
193
194
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


195
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


212
EBNF_AST_transformation_table = {
213
    # AST Transformations for EBNF-grammar
214
    "+":
215
        remove_expendables,
216
    "syntax":
217
        [],  # otherwise '"*": replace_by_single_child' would be applied
218
    "directive, definition":
219
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
220
    "expression":
221
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
222
    "term":
223
224
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
225
    "factor, flowmarker, retrieveop":
226
        replace_by_single_child,
227
    "group":
228
        [remove_brackets, replace_by_single_child],
229
230
    "unordered":
        remove_brackets,
231
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
232
        [reduce_single_child, remove_brackets,
233
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
234
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
235
        reduce_single_child,
236
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
237
        reduce_single_child,
238
    "list_":
239
        [flatten, remove_infix_operator],
240
    "*":
241
        replace_by_single_child
242
243
}

244

Eckhart Arnold's avatar
Eckhart Arnold committed
245
def EBNFTransform() -> TransformationFunc:
246
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
247

248
def get_ebnf_transformer() -> TransformationFunc:
249
250
251
252
253
254
255
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
256
257
258
259
260
261
262
263


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

264

265
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
266
ParserFactoryFunc = Callable[[], Grammar]
267
TransformerFactoryFunc = Callable[[], TransformationFunc]
268
269
CompilerFactoryFunc = Callable[[], Compiler]

270
271
272
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
273
274
275
276
'''


GRAMMAR_FACTORY = '''
277
def get_grammar() -> {NAME}Grammar:
278
279
280
281
282
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
283
284
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
285
286
287
288
'''


TRANSFORMER_FACTORY = '''
289
290
291
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

292
def get_transformer() -> TransformationFunc:
293
294
295
296
297
298
299
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
300
301
302
303
'''


COMPILER_FACTORY = '''
304
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
305
306
307
308
309
310
311
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
312
313
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
314
315
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
316

317
318
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
319
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
320
321
322
    pass


323
class EBNFCompiler(Compiler):
324
325
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
326
    in EBNF-Notation.
327
328
329
330
331
332
333
334
335
336
337
338
339

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
340
        current_symbols:  During compilation, a list containing the root
341
342
343
344
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

345
        rules:  Dictionary that maps rule names to a list of Nodes that
346
347
348
349
350
351
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

352
                Now `[node.content for node in self.rules['alternative']]`
353
354
                yields `['alternative = a | b', 'a', 'b']`

355
        symbols:  A mapping of symbol names to their first usage (not
356
357
                their definition!) in the EBNF source.

358
        variables:  A set of symbols names that are used with the
359
360
361
362
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

363
        recursive:  A set of symbols that are used recursively and
364
365
                therefore require a `Forward`-operator.

366
        definitions:  A dictionary of definitions. Other than `rules`
367
368
                this maps the symbols to their compiled definienda.

369
        deferred_taks:  A list of callables that is filled during
370
371
372
373
374
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

375
        root:   The name of the root symbol.
376

377
        directives:  A dictionary of all directives and their default
378
                values.
379
380
381

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
382
383
    """
    COMMENT_KEYWORD = "COMMENT__"
384
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
385
386
    RAW_WS_KEYWORD = "WHITESPACE__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
387
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
388
                "Potentially due to erroneous AST transformation."
389
390
391
392
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
393
394
395
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
396
    REPEATABLE_DIRECTIVES = {'tokens'}
397

398

399
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
400
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
401
402
        self._reset()

403

404
    def _reset(self):
405
        super(EBNFCompiler, self)._reset()
406
        self._result = ''           # type: str
407
        self.re_flags = set()       # type: Set[str]
408
409
410
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
411
412
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
413
        self.definitions = {}       # type: Dict[str, str]
414
        self.deferred_tasks = []    # type: List[Callable]
415
        self.root_symbol = ""       # type: str
416
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
417
                           'comment': '',
418
                           'literalws': {'right'},
419
                           'tokens': set(),  # alt. 'preprocessor_tokens'
420
421
422
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
423

Eckhart Arnold's avatar
Eckhart Arnold committed
424
    @property
425
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
426
427
        return self._result

428
    # methods for generating skeleton code for preprocessor, transformer, and compiler
429

430
    def gen_preprocessor_skeleton(self) -> str:
431
432
433
434
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
435
        name = self.grammar_name + "Preprocessor"
436
        return "def %s(text):\n    return text, lambda i: i\n" % name \
437
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
438

439

440
    def gen_transformer_skeleton(self) -> str:
441
442
443
444
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
445
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
446
447
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
448
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
449
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
450
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
451
        transtable.append('    "+": remove_empty,')
452
        for name in self.rules:
eckhart's avatar
eckhart committed
453
            transformations = '[]'
454
455
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
456
                transformations = '[replace_or_reduce]'
457
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
458
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
459
            transtable.append('    "' + name + '": %s,' % transformations)
Eckhart Arnold's avatar
Eckhart Arnold committed
460
        transtable.append('    ":Token, :RE": reduce_single_child,')
461
        transtable += ['    "*": replace_by_single_child', '}', '']
462
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
463
464
        return '\n'.join(transtable)

465

466
    def gen_compiler_skeleton(self) -> str:
467
468
469
470
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
471
        if not self.rules:
472
473
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
474
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
475
476
477
478
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
479
                    self.grammar_name + '", grammar_source=""):',
480
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
481
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
482
                    r"        assert re.match('\w+\Z', grammar_name)", '']
483
        for name in self.rules:
484
            method_name = Compiler.method_name(name)
485
            if name == self.root_symbol:
486
                compiler += ['    def ' + method_name + '(self, node):',
487
488
                             '        return node', '']
            else:
di68kap's avatar
di68kap committed
489
490
                compiler += ['    # def ' + method_name + '(self, node):',
                             '    #     return node', '']
491
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
492
        return '\n'.join(compiler)
493

494
495
496
497
498
499
500
501
502
503
504
505
    def verify_transformation_table(self, transtable):
        assert self._dirty_flag
        table_entries = set(expand_table(transtable).keys()) - {'*', '+', '~'}
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
                                      Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
        return messages

506

507
508
509
510
511
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
512
513
514
515
516
517
518
519
520
521

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

522
523
524
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
525
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
526

527
528
        # add special fields for Grammar class

529
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
530
                            if 'right' in self.directives['literalws'] else "''"))
531
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
532
                            if 'left' in self.directives['literalws'] else "''"))
533
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
534
535
536
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
537
538
539
540
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
541

542
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
543
        declarations = ['class ' + self.grammar_name +
544
                        'Grammar(Grammar):',
545
546
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
547
                        (', with this grammar:' if self.grammar_source else '.')]
548
        definitions.append(('parser_initialization__', '"upon instantiation"'))
549
        if self.grammar_source:
550
            definitions.append(('source_hash__',
551
                                '"%s"' % md5(self.grammar_source, __version__)))
552
            declarations.append('')
553
            declarations += [line for line in self.grammar_source.split('\n')]
554
555
556
557
558
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
559

560
        self.root_symbol = definitions[0][0] if definitions else ""
561
562
563
564
565
566
567
568
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
569
570
571
572
573
574
575

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
576
                # root_node.error_flag = True
577
578
579

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
580
581
582
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
583
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
584
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
585
586
587
588
589
590
591
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
592
            self.rules[leftover][0].add_error(
593
                ('Rule "%s" is not connected to parser root "%s" !') %
eckhart's avatar
eckhart committed
594
                (leftover, self.root_symbol), Error.WARNING)
595

596
        # set root_symbol parser and assemble python grammar definition
597

598
599
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
600
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
601
602
603
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
604

605
606
607

    ## compilation methods

608
    def on_syntax(self, node: Node) -> str:
609
        definitions = []  # type: List[Tuple[str, str]]
610
611

        # drop the wrapping sequence node
612
613
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
614
615

        # compile definitions and directives and collect definitions
616
        for nd in node.children:
617
            if nd.parser.name == "definition":
618
                definitions.append(self.compile(nd))
619
            else:
620
                assert nd.parser.name == "directive", nd.as_sxpr()
621
                self.compile(nd)
622
            node.error_flag = max(node.error_flag, nd.error_flag)
623
        self.definitions.update(definitions)
624

625
        return self.assemble_parser(definitions, node)
626

627

628
    def on_definition(self, node: Node) -> Tuple[str, str]:
629
        rule = node.children[0].content
630
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
631
632
633
634
635
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
636
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
637
638
639
640
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
641
        elif rule in self.directives['tokens']:
642
            node.add_error('Symbol "%s" has already been defined as '
643
                           'a preprocessor token.' % rule)
644
645
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
646
                           % rule + '(This may change in the future.)')
647
        try:
648
649
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
650
            defn = self.compile(node.children[1])
651
            if rule in self.variables:
652
                defn = 'Capture(%s)' % defn
653
                self.variables.remove(rule)
654
655
656
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
657
        except TypeError as error:
658
659
660
661
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
662
663
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
664
        return rule, defn
665

666

667
    def _check_rx(self, node: Node, rx: str) -> str:
668
669
        """
        Checks whether the string `rx` represents a valid regular
670
671
672
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
673
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
674
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
675
676
677
678
679
680
681
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

682

683
    def on_directive(self, node: Node) -> str:
684
        key = node.children[0].content.lower()
685
        assert key not in self.directives['tokens']
686

687
688
689
690
691
692
693
694
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
                node.add_error('Directive "%s" has already been defined earlier. ' % key + \
                               'Later definition will be ignored!',
                               code=Error.REDEFINED_DIRECTIVE_WARNING)
                return ""
            self.defined_directives.add(key)

695
        if key in {'comment', 'whitespace'}:
696
697
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
698
                    node.add_error('Directive "%s" must have one, but not %i values.' %
699
                                   (key, len(node.children[1].result)))
700
                value = self.compile(node.children[1]).pop()
701
702
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
703
                else:
704
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
705
            else:
706
707
708
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
709
710
711
712
713
714
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
715
716
717
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
718
            self.directives[key] = value
719

720
        elif key == 'ignorecase':
721
            if node.children[1].content.lower() not in {"off", "false", "no"}:
722
723
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
724
        # elif key == 'testing':
725
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
726
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
727

728
        elif key == 'literalws':
729
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
730
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
731
                    or ('none' in value and len(value) > 1)):
732
733
734
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
735
            wsp = {'left', 'right'} if 'both' in value \
736
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
737
            self.directives[key] = list(wsp)
738

739
        elif key in {'tokens', 'preprocessor_tokens'}:
740
            tokens = self.compile(node.children[1])
741
            redeclared = self.directives['tokens'] & tokens
742
743
744
745
746
            if redeclared:
                node.add_error('Tokens %s have already been declared earlier. '
                               % str(redeclared) + 'Later declaration will be ignored',
                               code=Error.REDECLARED_TOKEN_WARNING)
            self.directives['tokens'] |= tokens - redeclared
747

748
        elif key.endswith('_filter'):
749
            filter_set = self.compile(node.children[1])
750
751
752
753
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
754

755
756
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
757
                           (key, ', '.join(list(self.directives.keys()))))
758
759
        return ""

760

761
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
762
763
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
764
765
        name for the particular non-terminal.
        """
766
        arguments = [self.compile(r) for r in node.children] + custom_args
767
        node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
768
769
        return parser_class + '(' + ', '.join(arguments) + ')'

770

771
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
772
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
773
774
        return self.non_terminal(node, 'Alternative')

775

776
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
777
778
779
780
781
782
783
784
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
785
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
di68kap's avatar
di68kap committed
786
787
788
789
790
791
792
793
794
795
796
797
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
798
        custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
di68kap's avatar
di68kap committed
799
800
801
        compiled = self.non_terminal(node, 'Series', custom_args)
        node.result = saved_result
        return compiled
802

803

804
    def on_factor(self, node: Node) -> str:
805
        assert node.children
806
        assert len(node.children) >= 2, node.as_sxpr()
807
        prefix = node.children[0].content
808
        custom_args = []  # type: List[str]
809
810

        if prefix in {'::', ':'}:
811
812
            assert len(node.children) == 2
            arg = node.children[-1]
813
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
814
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
815
816
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
817
            if str(arg) in self.directives['filter']:
818
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
819
            self.variables.add(str(arg))  # cast(str, arg.result)
820

821
        elif len(node.children) > 2:
822
823
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
824
825
826
827
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
828

829
        node.result = node.children[1:]
830
831
        try:
            parser_class = self.PREFIX_TABLE[prefix]
832
833
834
835
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
836
837
838
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
839
                        symlist = self.rules.get(nd.content, [])
840
841
842
843
844
845
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
846
847
                    if (nd.parser.name != "regexp" or nd.content[:1] != '/'
                            or nd.content[-1:] != '/'):
848
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
849
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
850
851
852
853

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
854
855
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
856
        return ""
857

858

859
    def on_option(self, node) -> str:
860
        return self.non_terminal(node, 'Option')
861

862

863
    def on_repetition(self, node) -> str:
864
865
        return self.non_terminal(node, 'ZeroOrMore')

866

867
    def on_oneormore(self, node) -> str:
868
869
        return self.non_terminal(node, 'OneOrMore')

870

871
    def on_group(self, node) -> str:
872
873
874
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

875
    def on_unordered(self, node) -> str:
876
877
878
        # return self.non_terminal(node, 'Unordered')
        assert len(node.children) == 1
        nd = node.children[0]