ebnf.py 40.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
import keyword
28
from collections import OrderedDict
29
from functools import partial
30

eckhart's avatar
eckhart committed
31
from DHParser.compile import CompilerError, Compiler
32
from DHParser.error import Error
33
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, RE, \
34
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
35
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
36
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
37
38
39
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
    typing
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
40
    reduce_single_child, replace_by_single_child, remove_expendables, \
41
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
42
from DHParser.versionnumber import __version__
eckhart's avatar
eckhart committed
43
44
from typing import Callable, Dict, List, Set, Tuple

45

46
__all__ = ('get_ebnf_preprocessor',
47
48
49
50
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
51
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
52
           'EBNFCompilerError',
53
           'EBNFCompiler',
54
           'grammar_changed',
55
           'PreprocessorFactoryFunc',
56
57
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
58
           'CompilerFactoryFunc')
59
60


Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
########################################################################
#
# EBNF scanning
#
########################################################################


68
69
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
70
71
72
73
74
75
76
77


########################################################################
#
# EBNF parsing
#
########################################################################

78

di68kap's avatar
di68kap committed
79
class EBNFGrammar(Grammar):
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
    r"""
    Parser for an EBNF source file, with this grammar::

        # EBNF-Grammar in EBNF

        @ comment    =  /#.*(?:\n|$)/                # comments start with '#' and
                                                     # eat all chars up to and including '\n'
        @ whitespace =  /\s*/                        # whitespace includes linefeed
        @ literalws  =  right                        # trailing whitespace of literals will be
                                                     # ignored tacitly

        syntax     =  [~//] { definition | directive } §EOF
        definition =  symbol §"=" expression
        directive  =  "@" §symbol "=" ( regexp | literal | list_ )

        expression =  term { "|" term }
        term       =  { ["§"] factor }+               # "§" means all following factors mandatory
        factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure
                                                              # it's not a definition
                    | [flowmarker] literal
                    | [flowmarker] regexp
                    | [flowmarker] oneormore
                    | [flowmarker] group
                    | [flowmarker] unordered
                    | repetition
                    | option

        flowmarker =  "!"  | "&"                     # '!' negative lookahead, '&' positive lookahead
                    | "-!" | "-&"                    # '-' negative lookbehind, '-&' positive lookbehind
        retrieveop =  "::" | ":"                     # '::' pop, ':' retrieve

        group      =  "(" §expression ")"
        unordered  =  "<" §expression ">"            # elements of expression in arbitrary order
        oneormore  =  "{" expression "}+"
        repetition =  "{" §expression "}"
        option     =  "[" §expression "]"

        symbol     =  /(?!\d)\w+/~                   # e.g. expression, factor, parameter_list
        literal    =  /"(?:[^"]|\\")*?"/~            # e.g. "(", '+', 'while'
                    | /'(?:[^']|\\')*?'/~            # whitespace following literals will be ignored
        regexp     =  /~?\/(?:\\\/|[^\/])*?\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading
                                                     # or trailing whitespace of a regular expression
                                                     # will be ignored tacitly.
        list_      =  /\w+/~ { "," /\w+/~ }          # comma separated list of symbols,
                                                     # e.g. BEGIN_LIST, END_LIST,
                                                     # BEGIN_QUOTE, END_QUOTE
                                                     # see CommonMark/markdown.py for an exmaple
        EOF =  !/./
"""
di68kap's avatar
di68kap committed
130
    expression = Forward()
131
132
    source_hash__ = "3fc9f5a340f560e847d9af0b61a68743"
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
133
134
135
136
137
138
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
    WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
    wspL__ = ''
    wspR__ = WSP__
    EOF = NegativeLookahead(RegExp('.'))
Eckhart Arnold's avatar
Eckhart Arnold committed
139
    list_ = Series(RE('\\w+'), ZeroOrMore(Series(Token(","), RE('\\w+'))))
di68kap's avatar
di68kap committed
140
141
142
    regexp = RE('~?/(?:\\\\/|[^/])*?/~?')
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
143
144
    option = Series(Token("["), expression, Token("]"), mandatory=1)
    repetition = Series(Token("{"), expression, Token("}"), mandatory=1)
Eckhart Arnold's avatar
Eckhart Arnold committed
145
    oneormore = Series(Token("{"), expression, Token("}+"))
146
    unordered = Series(Token("<"), expression, Token(">"), mandatory=1)
147
    group = Series(Token("("), expression, Token(")"), mandatory=1)
di68kap's avatar
di68kap committed
148
149
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("-!"), Token("-&"))
150
151
152
153
154
    factor = Alternative(
        Series(Option(flowmarker), Option(retrieveop), symbol, NegativeLookahead(Token("="))),
        Series(Option(flowmarker), literal), Series(Option(flowmarker), regexp),
        Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
        Series(Option(flowmarker), unordered), repetition, option)
Eckhart Arnold's avatar
Eckhart Arnold committed
155
156
    term = OneOrMore(Series(Option(Token("§")), factor))
    expression.set(Series(term, ZeroOrMore(Series(Token("|"), term))))
157
158
    directive = Series(Token("@"), symbol, Token("="), Alternative(regexp, literal, list_),
                       mandatory=1)
di68kap's avatar
di68kap committed
159
    definition = Series(symbol, Token("="), expression, mandatory=1)
160
161
    syntax = Series(Option(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)),
                    EOF, mandatory=2)
di68kap's avatar
di68kap committed
162
163
164
    root__ = syntax


165
def grammar_changed(grammar_class, grammar_source: str) -> bool:
166
167
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
186
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
187
188
189
190
191
192
193
194
195
196
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


197
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


214
EBNF_AST_transformation_table = {
215
    # AST Transformations for EBNF-grammar
216
    "+":
217
        remove_expendables,
218
    "syntax":
219
        [],  # otherwise '"*": replace_by_single_child' would be applied
220
    "directive, definition":
221
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
222
    "expression":
223
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
224
    "term":
225
226
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
227
    "factor, flowmarker, retrieveop":
228
        replace_by_single_child,
229
    "group":
230
        [remove_brackets, replace_by_single_child],
231
232
    "unordered":
        remove_brackets,
233
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
234
        [reduce_single_child, remove_brackets,
235
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
236
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
237
        reduce_single_child,
238
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
239
        reduce_single_child,
240
    "list_":
241
        [flatten, remove_infix_operator],
242
    "*":
243
        replace_by_single_child
244
245
}

246

Eckhart Arnold's avatar
Eckhart Arnold committed
247
def EBNFTransform() -> TransformationFunc:
248
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
249

250
def get_ebnf_transformer() -> TransformationFunc:
251
252
253
254
255
256
257
    global thread_local_EBNF_transformer_singleton
    try:
        transformer = thread_local_EBNF_transformer_singleton
    except NameError:
        thread_local_EBNF_transformer_singleton = EBNFTransform()
        transformer = thread_local_EBNF_transformer_singleton
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
258
259
260
261
262
263
264
265


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

266

267
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
268
ParserFactoryFunc = Callable[[], Grammar]
269
TransformerFactoryFunc = Callable[[], TransformationFunc]
270
271
CompilerFactoryFunc = Callable[[], Compiler]

272
273
274
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
275
276
277
278
'''


GRAMMAR_FACTORY = '''
279
def get_grammar() -> {NAME}Grammar:
280
281
282
283
284
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
285
286
        grammar = thread_local_{NAME}_grammar_singleton
    return grammar
287
288
289
290
'''


TRANSFORMER_FACTORY = '''
291
292
293
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

294
def get_transformer() -> TransformationFunc:
295
296
297
298
299
300
301
    global thread_local_{NAME}_transformer_singleton
    try:
        transformer = thread_local_{NAME}_transformer_singleton
    except NameError:
        thread_local_{NAME}_transformer_singleton = {NAME}Transform()
        transformer = thread_local_{NAME}_transformer_singleton
    return transformer
302
303
304
305
'''


COMPILER_FACTORY = '''
306
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
307
308
309
310
311
312
313
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
314
315
        compiler = thread_local_{NAME}_compiler_singleton
    return compiler
316
317
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
318

eckhart's avatar
eckhart committed
319
class EBNFCompilerError(CompilerError):
320
    """Error raised by `EBNFCompiler` class. (Not compilation errors
321
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
322
323
324
    pass


325
class EBNFCompiler(Compiler):
326
327
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
328
    in EBNF-Notation.
329
330
331
332
333
334
335
336
337
338
339
340
341

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
342
        current_symbols:  During compilation, a list containing the root
343
344
345
346
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

347
        rules:  Dictionary that maps rule names to a list of Nodes that
348
349
350
351
352
353
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

354
                Now `[node.content for node in self.rules['alternative']]`
355
356
                yields `['alternative = a | b', 'a', 'b']`

357
        symbols:  A mapping of symbol names to their first usage (not
358
359
                their definition!) in the EBNF source.

360
        variables:  A set of symbols names that are used with the
361
362
363
364
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

365
        recursive:  A set of symbols that are used recursively and
366
367
                therefore require a `Forward`-operator.

368
        definitions:  A dictionary of definitions. Other than `rules`
369
370
                this maps the symbols to their compiled definienda.

371
        deferred_taks:  A list of callables that is filled during
372
373
374
375
376
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

377
        root:   The name of the root symbol.
378

379
        directives:  A dictionary of all directives and their default
380
                values.
381
382
383

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
384
385
    """
    COMMENT_KEYWORD = "COMMENT__"
386
    WHITESPACE_KEYWORD = "WSP__"
Eckhart Arnold's avatar
Eckhart Arnold committed
387
388
    RAW_WS_KEYWORD = "WHITESPACE__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
389
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
390
                "Potentially due to erroneous AST transformation."
391
392
393
394
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
395
396
397
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
398
    REPEATABLE_DIRECTIVES = {'tokens'}
399

400

401
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
402
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
403
404
        self._reset()

405

406
    def _reset(self):
407
        super(EBNFCompiler, self)._reset()
408
        self._result = ''           # type: str
409
        self.re_flags = set()       # type: Set[str]
410
411
412
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
413
414
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
415
        self.definitions = {}       # type: Dict[str, str]
416
        self.deferred_tasks = []    # type: List[Callable]
417
        self.root_symbol = ""       # type: str
418
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
419
                           'comment': '',
420
                           'literalws': {'right'},
421
                           'tokens': set(),  # alt. 'preprocessor_tokens'
422
423
424
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
425

Eckhart Arnold's avatar
Eckhart Arnold committed
426
    @property
427
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
428
429
        return self._result

430
    # methods for generating skeleton code for preprocessor, transformer, and compiler
431

432
    def gen_preprocessor_skeleton(self) -> str:
433
434
435
436
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
437
        name = self.grammar_name + "Preprocessor"
438
        return "def %s(text):\n    return text, lambda i: i\n" % name \
439
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
440

441

442
    def gen_transformer_skeleton(self) -> str:
443
444
445
446
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
447
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
448
449
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
450
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
451
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
452
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
453
        transtable.append('    "+": remove_empty,')
454
        for name in self.rules:
eckhart's avatar
eckhart committed
455
            transformations = '[]'
456
457
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
458
                transformations = '[replace_or_reduce]'
459
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
460
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
461
            transtable.append('    "' + name + '": %s,' % transformations)
Eckhart Arnold's avatar
Eckhart Arnold committed
462
        transtable.append('    ":Token, :RE": reduce_single_child,')
463
        transtable += ['    "*": replace_by_single_child', '}', '']
464
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
465
466
        return '\n'.join(transtable)

467

468
    def gen_compiler_skeleton(self) -> str:
469
470
471
472
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
473
        if not self.rules:
474
475
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
476
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
477
478
479
480
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
481
                    self.grammar_name + '", grammar_source=""):',
482
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
483
                    'Compiler, self).__init__(grammar_name, grammar_source)',
eckhart's avatar
eckhart committed
484
                    r"        assert re.match('\w+\Z', grammar_name)", '']
485
        for name in self.rules:
486
            method_name = Compiler.method_name(name)
487
            if name == self.root_symbol:
488
                compiler += ['    def ' + method_name + '(self, node):',
489
490
                             '        return node', '']
            else:
di68kap's avatar
di68kap committed
491
492
                compiler += ['    # def ' + method_name + '(self, node):',
                             '    #     return node', '']
493
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
494
        return '\n'.join(compiler)
495

496
497
498
499
500
501
502
503
504
505
506
507
    def verify_transformation_table(self, transtable):
        assert self._dirty_flag
        table_entries = set(expand_table(transtable).keys()) - {'*', '+', '~'}
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
                                      Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
        return messages

508

509
510
511
512
513
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
514
515
516
517
518
519
520
521
522
523

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

524
525
526
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
527
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
528

529
530
        # add special fields for Grammar class

531
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
532
                            if 'right' in self.directives['literalws'] else "''"))
533
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
534
                            if 'left' in self.directives['literalws'] else "''"))
535
        definitions.append((self.WHITESPACE_KEYWORD,
Eckhart Arnold's avatar
Eckhart Arnold committed
536
537
538
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD +
                             ", comment=" + self.COMMENT_KEYWORD + ")")))
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
539
540
541
542
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
543

544
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
545
        declarations = ['class ' + self.grammar_name +
546
                        'Grammar(Grammar):',
547
548
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
549
                        (', with this grammar:' if self.grammar_source else '.')]
550
        definitions.append(('parser_initialization__', '"upon instantiation"'))
551
        if self.grammar_source:
552
            definitions.append(('source_hash__',
553
                                '"%s"' % md5(self.grammar_source, __version__)))
554
            declarations.append('')
555
            declarations += [line for line in self.grammar_source.split('\n')]
556
557
558
559
560
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
561

562
        self.root_symbol = definitions[0][0] if definitions else ""
563
564
565
566
567
568
569
570
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
571
572
573
574
575
576
577

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
578
                # root_node.error_flag = True
579
580
581

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
582
583
584
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
585
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
586
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
587
588
589
590
591
592
593
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
594
            self.rules[leftover][0].add_error(
595
                ('Rule "%s" is not connected to parser root "%s" !') %
eckhart's avatar
eckhart committed
596
                (leftover, self.root_symbol), Error.WARNING)
597

598
        # set root_symbol parser and assemble python grammar definition
599

600
601
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
602
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
603
604
605
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
606

607
608
609

    ## compilation methods

610
    def on_syntax(self, node: Node) -> str:
611
        definitions = []  # type: List[Tuple[str, str]]
612
613

        # drop the wrapping sequence node
614
615
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
616
617

        # compile definitions and directives and collect definitions
618
        for nd in node.children:
619
            if nd.parser.name == "definition":
620
                definitions.append(self.compile(nd))
621
            else:
622
                assert nd.parser.name == "directive", nd.as_sxpr()
623
                self.compile(nd)
624
            node.error_flag = max(node.error_flag, nd.error_flag)
625
        self.definitions.update(definitions)
626

627
        return self.assemble_parser(definitions, node)
628

629

630
    def on_definition(self, node: Node) -> Tuple[str, str]:
631
        rule = node.children[0].content
632
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
633
634
635
636
637
            first = self.rules[rule][0]
            if not first._errors:
                first.add_error('First definition of rule "%s" '
                                'followed by illegal redefinitions.' % rule)
            node.add_error('A rule with name "%s" has already been defined earlier.' % rule)
638
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
639
640
641
642
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
643
        elif rule in self.directives['tokens']:
644
            node.add_error('Symbol "%s" has already been defined as '
645
                           'a preprocessor token.' % rule)
646
647
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
648
                           % rule + '(This may change in the future.)')
649
        try:
650
651
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
652
            defn = self.compile(node.children[1])
653
            if rule in self.variables:
654
                defn = 'Capture(%s)' % defn
655
                self.variables.remove(rule)
656
657
658
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
659
        except TypeError as error:
660
661
662
663
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
664
665
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
666
        return rule, defn
667

668

669
    def _check_rx(self, node: Node, rx: str) -> str:
670
671
        """
        Checks whether the string `rx` represents a valid regular
672
673
674
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
675
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
676
        if flags:  rx = "(?%s)%s" % ("".join(flags), rx)
677
678
679
680
681
682
683
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

684

685
    def on_directive(self, node: Node) -> str:
686
        key = node.children[0].content.lower()
687
        assert key not in self.directives['tokens']
688

689
690
691
692
693
694
695
696
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
                node.add_error('Directive "%s" has already been defined earlier. ' % key + \
                               'Later definition will be ignored!',
                               code=Error.REDEFINED_DIRECTIVE_WARNING)
                return ""
            self.defined_directives.add(key)

697
        if key in {'comment', 'whitespace'}:
698
699
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
700
                    node.add_error('Directive "%s" must have one, but not %i values.' %
701
                                   (key, len(node.children[1].result)))
702
                value = self.compile(node.children[1]).pop()
703
704
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
705
                else:
706
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
707
            else:
708
709
710
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
711
712
713
714
715
716
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
717
718
719
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
720
            self.directives[key] = value
721

722
        elif key == 'ignorecase':
723
            if node.children[1].content.lower() not in {"off", "false", "no"}:
724
725
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
726
        # elif key == 'testing':
727
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
728
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
729

730
        elif key == 'literalws':
731
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
732
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
733
                    or ('none' in value and len(value) > 1)):
734
735
736
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
737
            wsp = {'left', 'right'} if 'both' in value \
738
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
739
            self.directives[key] = list(wsp)
740

741
        elif key in {'tokens', 'preprocessor_tokens'}:
742
            tokens = self.compile(node.children[1])
743
            redeclared = self.directives['tokens'] & tokens
744
745
746
747
748
            if redeclared:
                node.add_error('Tokens %s have already been declared earlier. '
                               % str(redeclared) + 'Later declaration will be ignored',
                               code=Error.REDECLARED_TOKEN_WARNING)
            self.directives['tokens'] |= tokens - redeclared
749

750
        elif key.endswith('_filter'):
751
            filter_set = self.compile(node.children[1])
752
753
754
755
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
756

757
758
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
759
                           (key, ', '.join(list(self.directives.keys()))))
760
761
        return ""

762

763
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
764
765
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
766
767
        name for the particular non-terminal.
        """
768
        arguments = [self.compile(r) for r in node.children] + custom_args
769
        node.error_flag = max(node.error_flag, max(t.error_flag for t in node.children))
770
771
        return parser_class + '(' + ', '.join(arguments) + ')'

772

773
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
774
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
775
776
        return self.non_terminal(node, 'Alternative')

777

778
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
779
780
781
782
783
784
785
786
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
        filtered_children = []
        i = 0
        for nd in node.children:
787
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
di68kap's avatar
di68kap committed
788
789
790
791
792
793
794
795
796
797
798
799
                mandatory_marker.append(i)
                if i == 0:
                    nd.add_error('First item of a series should not be mandatory.',
                                 Error.WARNING)
                elif len(mandatory_marker) > 1:
                    nd.add_error('One mandatory marker (§) sufficient to declare the '
                                 'rest of the series as mandatory.', Error.WARNING)
            else:
                filtered_children.append(nd)
                i += 1
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
800
        custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
di68kap's avatar
di68kap committed
801
802
803
        compiled = self.non_terminal(node, 'Series', custom_args)
        node.result = saved_result
        return compiled
804

805

806
    def on_factor(self, node: Node) -> str:
807
        assert node.children
808
        assert len(node.children) >= 2, node.as_sxpr()
809
        prefix = node.children[0].content
810
        custom_args = []  # type: List[str]
811
812

        if prefix in {'::', ':'}:
813
814
            assert len(node.children) == 2
            arg = node.children[-1]
815
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
816
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
817
818
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
819
            if str(arg) in self.directives['filter']:
820
                custom_args = ['rfilter=%s' % self.directives['filter'][str(arg)]]
821
            self.variables.add(str(arg))  # cast(str, arg.result)
822

823
        elif len(node.children) > 2:
824
825
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
826
827
828
829
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
830

831
        node.result = node.children[1:]
832
833
        try:
            parser_class = self.PREFIX_TABLE[prefix]
834
835
836
837
            result = self.non_terminal(node, parser_class, custom_args)
            if prefix[:1] == '-':
                def check(node):
                    nd = node
838
839
840
                    if len(nd.children) >= 1:
                        nd = nd.children[0]
                    while nd.parser.name == "symbol":
841
                        symlist = self.rules.get(nd.content, [])
842
843
844
845
846
847
                        if len(symlist) == 2:
                            nd = symlist[1]
                        else:
                            if len(symlist) == 1:
                                nd = symlist[0].children[1]
                            break
848
849
                    if (nd.parser.name != "regexp" or nd.content[:1] != '/'
                            or nd.content[-1:] != '/'):
850
                        node.add_error("Lookbehind-parser can only be used with plain RegExp-"
851
                                       "parsers, not with: " + nd.parser.name + nd.parser.ptype)
852
853
854
855

                if not result.startswith('RegExp('):
                    self.deferred_tasks.append(lambda: check(node))
            return result
856
857
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
858
        return ""
859

860

861
    def on_option(self, node) -> str:
862
        return self.non_terminal(node, 'Option')
863

864

865
    def on_repetition(self, node) -> str:
866
867
        return self.non_terminal(node, 'ZeroOrMore')

868

869
    def on_oneormore(self, node) -> str:
870
871
        return self.non_terminal(node, 'OneOrMore')

872

873
    def on_group(self, node) -> str:
874
875
876
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

877
    def on_unordered(self, node) -> str:
878
879
880
        # return self.non_terminal(node, 'Unordered')
        assert len(node.children) == 1
        nd = node.children[0]
eckhart's avatar
eckhart committed
881
        for child in nd.children:
882
            if child.parser.ptype == TOKEN_PTYPE and nd.content == "§":
883
                node.add_error("Unordered parser lists cannot contain mandatory (§) items.")
eckhart's avatar
eckhart committed
884
        args = ', '.join(self.compile(child) for child in nd.children)
885
886
887
888
889
890
891
        if nd.parser.name == "term":
            return "AllOf(" + args + ")"
        elif nd.parser.name == "expression":
            return "SomeOf(" + args + ")"
        else:
            node.add_error("Unordered sequence or alternative requires at least two elements.")
            return ""
892

893
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
894
        symbol = node.content  # ; assert result == cast(str, node.result)
895
        if symbol in self.directives['tokens']: