ebnf.py 43.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
import keyword
28
from collections import OrderedDict
29
from functools import partial
30

eckhart's avatar
eckhart committed
31
from DHParser.compile import CompilerError, Compiler
32
from DHParser.error import Error
33
34
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, Whitespace, \
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token
35
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
36
from DHParser.syntaxtree import Node, RootNode, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
37
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
38
    GLOBALS, typing
eckhart's avatar
eckhart committed
39
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
40
    reduce_single_child, replace_by_single_child, remove_expendables, \
41
    remove_tokens, flatten, forbid, assert_content, remove_infix_operator
42
from DHParser.versionnumber import __version__
43
from typing import Callable, Dict, List, Set, Tuple, Any
eckhart's avatar
eckhart committed
44

45

46
__all__ = ('get_ebnf_preprocessor',
47
48
49
50
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
51
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
52
           'EBNFCompilerError',
53
           'EBNFCompiler',
54
           'grammar_changed',
55
           'PreprocessorFactoryFunc',
56
57
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
58
           'CompilerFactoryFunc')
59
60


Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
########################################################################
#
# EBNF scanning
#
########################################################################


68
69
def get_ebnf_preprocessor() -> PreprocessorFunc:
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
70
71
72
73
74
75
76
77


########################################################################
#
# EBNF parsing
#
########################################################################

78

di68kap's avatar
di68kap committed
79
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
    r"""
    Parser for an EBNF source file, with this grammar::

        # EBNF-Grammar in EBNF

        @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
        @ whitespace = /\s*/                            # whitespace includes linefeed
        @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly

        syntax     = [~//] { definition | directive } §EOF
        definition = symbol §"=" expression
        directive  = "@" §symbol "=" ( regexp | literal | list_ )

        expression = term { "|" term }
        term       = { ["§"] factor }+                       # "§" means all following factors mandatory
        factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                   | [flowmarker] literal
                   | [flowmarker] plaintext
                   | [flowmarker] regexp
                   | [flowmarker] whitespace
                   | [flowmarker] oneormore
                   | [flowmarker] group
                   | [flowmarker] unordered
                   | repetition
                   | option

        flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
                   | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
        retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

        group      = "(" §expression ")"
        unordered  = "<" §expression ">"                # elements of expression in arbitrary order
        oneormore  = "{" expression "}+"
        repetition = "{" §expression "}"
        option     = "[" §expression "]"

        symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
        literal    = /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                   | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
        plaintext  = /`(?:[^"]|\\")*?`/~                # like literal but does not eat whitespace
120
        regexp     = /\/(?:\\\/|[^\/])*?\//~            # e.g. /\w+/, ~/#.*(?:\n|$)/~
eckhart's avatar
eckhart committed
121
122
123
124
125
126
                                                        # '~' is a whitespace-marker, if present leading or trailing
                                                        # whitespace of a regular expression will be ignored tacitly.
        whitespace = /~/~                               # implicit or default whitespace
        list_      = /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                        # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
        EOF = !/./
127
    """
di68kap's avatar
di68kap committed
128
    expression = Forward()
129
    parser_initialization__ = "upon instantiation"
di68kap's avatar
di68kap committed
130
131
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
132
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
133
    wsp__ = Whitespace(WSP_RE__)
di68kap's avatar
di68kap committed
134
    EOF = NegativeLookahead(RegExp('.'))
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
    list_ = Series(RegExp('\\w+'), wsp__, ZeroOrMore(Series(Series(Token(","), wsp__), RegExp('\\w+'), wsp__)))
    whitespace = Series(RegExp('~'), wsp__)
    regexp = Series(RegExp('/(?:\\\\/|[^/])*?/'), wsp__)
    plaintext = Series(RegExp('`(?:[^"]|\\\\")*?`'), wsp__)
    literal = Alternative(Series(RegExp('"(?:[^"]|\\\\")*?"'), wsp__), Series(RegExp("'(?:[^']|\\\\')*?'"), wsp__))
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
                                NegativeLookahead(Series(Token("="), wsp__))),
151
152
                         Series(Option(flowmarker), literal), Series(Option(flowmarker), plaintext),
                         Series(Option(flowmarker), regexp), Series(Option(flowmarker), whitespace),
153
                         Series(Option(flowmarker), oneormore), Series(Option(flowmarker), group),
154
                         Series(Option(flowmarker), unordered), repetition, option)
155
156
157
158
159
160
161
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
                       Alternative(regexp, literal, list_), mandatory=1)
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
    syntax = Series(Option(Series(wsp__, RegExp(''))),
                    ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
162
163
164
    root__ = syntax


165
def grammar_changed(grammar_class, grammar_source: str) -> bool:
166
167
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
186
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
187
188
189
190
191
192
193
194
195
196
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


197
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
198
    try:
199
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
200
        return grammar
201
    except AttributeError:
202
203
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
204
205
206
207
208
209
210
211
212


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


213
EBNF_AST_transformation_table = {
214
    # AST Transformations for EBNF-grammar
215
    "<":
216
        remove_expendables,
217
    "syntax":
218
        [],  # otherwise '"*": replace_by_single_child' would be applied
219
    "directive, definition":
220
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
221
    "expression":
222
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
223
    "term":
224
225
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
226
    "factor, flowmarker, retrieveop":
227
        replace_by_single_child,
228
    "group":
229
        [remove_brackets, replace_by_single_child],
230
231
    "unordered":
        remove_brackets,
232
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
233
        [reduce_single_child, remove_brackets,
234
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
235
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
236
        reduce_single_child,
237
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
238
        reduce_single_child,
239
    "list_":
240
        [flatten, remove_infix_operator],
241
    "*":
242
        replace_by_single_child
243
244
}

245

Eckhart Arnold's avatar
Eckhart Arnold committed
246
def EBNFTransform() -> TransformationFunc:
247
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
248

eckhart's avatar
eckhart committed
249

250
def get_ebnf_transformer() -> TransformationFunc:
251
    try:
252
        transformer = GLOBALS.EBNF_transformer_singleton
253
    except AttributeError:
254
255
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
256
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
257
258
259
260
261
262
263
264


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

265

266
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
267
ParserFactoryFunc = Callable[[], Grammar]
268
TransformerFactoryFunc = Callable[[], TransformationFunc]
269
270
CompilerFactoryFunc = Callable[[], Compiler]

271
272
273
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
274
275
276
277
'''


GRAMMAR_FACTORY = '''
278
def get_grammar() -> {NAME}Grammar:
279
    try:
280
        grammar = GLOBALS.{NAME}_{ID}_grammar_singleton
281
    except AttributeError:
282
283
        GLOBALS.{NAME}_{ID}_grammar_singleton = {NAME}Grammar()
        grammar = GLOBALS.{NAME}_{ID}_grammar_singleton
284
    return grammar
285
286
287
288
'''


TRANSFORMER_FACTORY = '''
289
290
291
def {NAME}Transform() -> TransformationDict:
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

292
def get_transformer() -> TransformationFunc:
293
    try:
294
        transformer = GLOBALS.{NAME}_{ID}_transformer_singleton
295
    except AttributeError:
296
297
        GLOBALS.{NAME}_{ID}_transformer_singleton = {NAME}Transform()
        transformer = GLOBALS.{NAME}_{ID}_transformer_singleton
298
    return transformer
299
300
301
302
'''


COMPILER_FACTORY = '''
303
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
304
    try:
305
        compiler = GLOBALS.{NAME}_{ID}_compiler_singleton
306
        compiler.set_grammar_name(grammar_name, grammar_source)
307
    except AttributeError:
308
        GLOBALS.{NAME}_{ID}_compiler_singleton = \\
309
            {NAME}Compiler(grammar_name, grammar_source)
310
        compiler = GLOBALS.{NAME}_{ID}_compiler_singleton
311
    return compiler
312
313
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
314

eckhart's avatar
eckhart committed
315
class EBNFCompilerError(CompilerError):
316
    """Error raised by `EBNFCompiler` class. (Not compilation errors
317
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
318
319
320
    pass


321
class EBNFCompiler(Compiler):
322
323
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
324
    in EBNF-Notation.
325
326
327
328
329
330
331
332
333
334
335
336
337

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

    Addionally, class EBNFCompiler provides helper methods to generate
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
338
        current_symbols:  During compilation, a list containing the root
339
340
341
342
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

343
        rules:  Dictionary that maps rule names to a list of Nodes that
344
345
346
347
348
349
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

350
                Now `[node.content for node in self.rules['alternative']]`
351
352
                yields `['alternative = a | b', 'a', 'b']`

353
        symbols:  A mapping of symbol names to their first usage (not
354
355
                their definition!) in the EBNF source.

356
        variables:  A set of symbols names that are used with the
357
358
359
360
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

361
        recursive:  A set of symbols that are used recursively and
362
363
                therefore require a `Forward`-operator.

364
        definitions:  A dictionary of definitions. Other than `rules`
365
366
                this maps the symbols to their compiled definienda.

367
        deferred_tasks:  A list of callables that is filled during
368
369
370
371
372
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

373
        root_symbol: The name of the root symbol.
374

375
        directives:  A dictionary of all directives and their default
376
                values.
377
378
379

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
380
381
382
383

        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
384
385
    """
    COMMENT_KEYWORD = "COMMENT__"
386
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
387
    RAW_WS_KEYWORD = "WHITESPACE__"
388
    WHITESPACE_PARSER_KEYWORD = "wsp__"
Eckhart Arnold's avatar
Eckhart Arnold committed
389
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD}
390
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
391
                "Potentially due to erroneous AST transformation."
392
393
394
395
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
396
397
398
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
399
    REPEATABLE_DIRECTIVES = {'tokens'}
400

401

402
    def __init__(self, grammar_name="", grammar_source=""):
403
        self.grammar_id = 0
Eckhart Arnold's avatar
Eckhart Arnold committed
404
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
405
        # self._reset()
406

407

408
    def _reset(self):
409
        super(EBNFCompiler, self)._reset()
410
        self._result = ''           # type: str
411
        self.re_flags = set()       # type: Set[str]
412
413
414
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
415
416
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
417
        self.definitions = {}       # type: Dict[str, str]
418
        self.deferred_tasks = []    # type: List[Callable]
419
        self.root_symbol = ""       # type: str
eckhart's avatar
eckhart committed
420
        self.directives = {'whitespace': self.WHITESPACE['vertical'],
421
                           'comment': '',
422
                           'literalws': {'right'},
423
                           'tokens': set(),  # alt. 'preprocessor_tokens'
424
425
426
                           'filter': dict()}  # alt. 'filter'
        # self.directives['ignorecase']: False
        self.defined_directives = set()  # type: Set[str]
427
428
        self.grammar_id += 1

429

Eckhart Arnold's avatar
Eckhart Arnold committed
430
    @property
431
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
432
433
        return self._result

434
    # methods for generating skeleton code for preprocessor, transformer, and compiler
435

436
    def gen_preprocessor_skeleton(self) -> str:
437
438
439
440
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
441
        name = self.grammar_name + "Preprocessor"
442
        return "def %s(text):\n    return text, lambda i: i\n" % name \
443
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
444

445

446
    def gen_transformer_skeleton(self) -> str:
447
448
449
450
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
451
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
452
453
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
454
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
455
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
456
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
457
        transtable.append('    "<": remove_empty,')
458
        for name in self.rules:
eckhart's avatar
eckhart committed
459
            transformations = '[]'
460
461
            rule = self.definitions[name]
            if rule.startswith('Alternative'):
eckhart's avatar
eckhart committed
462
                transformations = '[replace_or_reduce]'
463
            elif rule.startswith('Synonym'):
Eckhart Arnold's avatar
Eckhart Arnold committed
464
                transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
465
            transtable.append('    "' + name + '": %s,' % transformations)
466
        transtable.append('    ":Token": reduce_single_child,')
467
        transtable += ['    "*": replace_by_single_child', '}', '']
468
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
469
470
        return '\n'.join(transtable)

471

472
    def gen_compiler_skeleton(self) -> str:
473
474
475
476
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
477
        if not self.rules:
478
479
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
480
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
481
482
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
483
                    '    """', '',
eckhart's avatar
eckhart committed
484
485
486
487
                    '    def __init__(self, grammar_name="'
                    + self.grammar_name + '", grammar_source=""):',
                    '        super(' + self.grammar_name
                    + 'Compiler, self).__init__(grammar_name, grammar_source)',
488
489
490
491
                    r"        assert re.match('\w+\Z', grammar_name)", '',
                    '    def _reset(self):',
                    '        super()._reset()',
                    '        # initialize your variables here, not in the constructor!']
492
        for name in self.rules:
493
            method_name = Compiler.method_name(name)
494
            if name == self.root_symbol:
495
                compiler += ['    def ' + method_name + '(self, node):',
496
                             '        return self.fallback_compiler(node)', '']
497
            else:
di68kap's avatar
di68kap committed
498
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
499
                             '    #     return node', '']
500
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
501
        return '\n'.join(compiler)
502

503
    def verify_transformation_table(self, transtable):
504
505
506
507
508
509
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
510
        assert self._dirty_flag
511
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
512
513
514
515
516
517
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
Eckhart Arnold's avatar
Eckhart Arnold committed
518
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSFORMATION_TABLE))
519
520
        return messages

521
522
523
524
525
526
527
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
528

529
530
531
532
533
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
534
535
536
537
538
539
540
541
542
543

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

544
545
546
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
547
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
548

549
550
        # add special fields for Grammar class

551
552
        definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
553
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
554
555
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
Eckhart Arnold's avatar
Eckhart Arnold committed
556
        definitions.append((self.RAW_WS_KEYWORD, "r'{whitespace}'".format(**self.directives)))
557
558
559
560
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
561

562
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
eckhart's avatar
eckhart committed
563
564
565
566
567
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
                        + (', with this grammar:' if self.grammar_source else '.')]
568
        definitions.append(('parser_initialization__', '"upon instantiation"'))
569
        if self.grammar_source:
570
            definitions.append(('source_hash__',
571
                                '"%s"' % md5(self.grammar_source, __version__)))
572
            declarations.append('')
573
            declarations += [line for line in self.grammar_source.split('\n')]
574
575
576
577
578
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
579

580
        self.root_symbol = definitions[0][0] if definitions else ""
581
582
583
584
585
586
587
588
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
589
590
591
592
593
594

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
595
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
596
                                    "Missing definition for symbol '%s'" % symbol)
597
                # root_node.error_flag = True
598
599
600

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
601
602
603
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
604
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
605
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
606
607
608
609
610
611
612
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
613
614
615
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
616

617
        # set root_symbol parser and assemble python grammar definition
618

619
620
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
621
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
622
        self._result = '\n    '.join(declarations) \
623
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
624
        return self._result
625

626
627
628

    ## compilation methods

629
    def on_syntax(self, node: Node) -> str:
630
        definitions = []  # type: List[Tuple[str, str]]
631
632

        # drop the wrapping sequence node
633
634
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
635
636

        # compile definitions and directives and collect definitions
637
        for nd in node.children:
638
            if nd.parser.name == "definition":
639
                definitions.append(self.compile(nd))
640
            else:
641
                assert nd.parser.name == "directive", nd.as_sxpr()
642
                self.compile(nd)
643
            # node.error_flag = max(node.error_flag, nd.error_flag)
644
        self.definitions.update(definitions)
645

646
        return self.assemble_parser(definitions, node)
647

648

649
    def on_definition(self, node: Node) -> Tuple[str, str]:
650
        rule = node.children[0].content
651
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
652
            first = self.rules[rule][0]
eckhart's avatar
eckhart committed
653
            if not first.errors:
eckhart's avatar
eckhart committed
654
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
655
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
656
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
657
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
658
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
659
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
660
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
661
                                ' end with a doube underscore "__".' % rule)
662
        elif rule in self.directives['tokens']:
eckhart's avatar
eckhart committed
663
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
664
                                'a preprocessor token.' % rule)
665
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
666
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
667
                                % rule + '(This may change in the future.)')
668
        try:
669
670
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
671
            defn = self.compile(node.children[1])
672
            if rule in self.variables:
673
                defn = 'Capture(%s)' % defn
674
                self.variables.remove(rule)
675
676
677
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
678
        except TypeError as error:
679
680
681
682
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
683
            self.tree.new_error(node, errmsg)
684
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
685
        return rule, defn
686

687

688
    def _check_rx(self, node: Node, rx: str) -> str:
689
690
        """
        Checks whether the string `rx` represents a valid regular
691
692
693
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
694
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
eckhart's avatar
eckhart committed
695
696
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
697
698
699
        try:
            re.compile(rx)
        except Exception as re_error:
eckhart's avatar
eckhart committed
700
            self.tree.new_error(node, "malformed regular expression %s: %s" %
eckhart's avatar
eckhart committed
701
                                (repr(rx), str(re_error)))
702
703
        return rx

704

705
    def on_directive(self, node: Node) -> str:
706
        key = node.children[0].content.lower()
707
        assert key not in self.directives['tokens']
708

709
710
        if key not in self.REPEATABLE_DIRECTIVES:
            if key in self.defined_directives:
eckhart's avatar
eckhart committed
711
                self.tree.new_error(node, 'Directive "%s" has already been defined earlier. '
eckhart's avatar
eckhart committed
712
713
                                    % key + 'Later definition will be ignored!',
                                    code=Error.REDEFINED_DIRECTIVE_WARNING)
714
715
716
                return ""
            self.defined_directives.add(key)

717
        if key in {'comment', 'whitespace'}:
718
719
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
eckhart's avatar
eckhart committed
720
                    self.tree.new_error(node, 'Directive "%s" must have one, but not %i values.'
eckhart's avatar
eckhart committed
721
                                        % (key, len(node.children[1].result)))
722
                value = self.compile(node.children[1]).pop()
723
724
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
725
                else:
eckhart's avatar
eckhart committed
726
                    self.tree.new_error(node, 'Value "%s" not allowed for directive "%s".'
eckhart's avatar
eckhart committed
727
                                        % (value, key))
728
            else:
729
730
731
                value = node.children[1].content.strip("~")  # cast(str, node.children[
                # 1].result).strip("~")
                if value != node.children[1].content:  # cast(str, node.children[1].result):
eckhart's avatar
eckhart committed
732
                    self.tree.new_error(node, "Whitespace marker '~' not allowed in definition "
eckhart's avatar
eckhart committed
733
                                        "of %s regular expression." % key)
734
735
736
737
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
738
                if key == 'whitespace' and not re.match(value, ''):
eckhart's avatar
eckhart committed
739
                    self.tree.new_error(node, "Implicit whitespace should always "
eckhart's avatar
eckhart committed
740
                                        "match the empty string, /%s/ does not." % value)
741
            self.directives[key] = value
742

743
        elif key == 'ignorecase':
744
            if node.children[1].content.lower() not in {"off", "false", "no"}:
745
746
                self.re_flags.add('i')

Eckhart Arnold's avatar
Eckhart Arnold committed
747
        # elif key == 'testing':
748
        #     value = node.children[1].content
Eckhart Arnold's avatar
Eckhart Arnold committed
749
        #     self.directives['testing'] = value.lower() not in {"off", "false", "no"}
750

751
        elif key == 'literalws':
752
            value = {item.lower() for item in self.compile(node.children[1])}
eckhart's avatar
eckhart committed
753
            if ((value - {'left', 'right', 'both', 'none'})
Eckhart Arnold's avatar
Eckhart Arnold committed
754
                    or ('none' in value and len(value) > 1)):
eckhart's avatar
eckhart committed
755
                self.tree.new_error(node, 'Directive "literalws" allows only `left`, `right`, '
eckhart's avatar
eckhart committed
756
                                    '`both` or `none`, not `%s`' % ", ".join(value))
eckhart's avatar
eckhart committed
757
            wsp = {'left', 'right'} if 'both' in value \
758
                else {} if 'none' in value else value
eckhart's avatar
eckhart committed
759
            self.directives[key] = list(wsp)
760

761
        elif key in {'tokens', 'preprocessor_tokens'}:
762
            tokens = self.compile(node.children[1])
763
            redeclared = self.directives['tokens'] & tokens
764
            if redeclared:
eckhart's avatar
eckhart committed
765
                self.tree.new_error(node, 'Tokens %s have already been declared earlier. '
eckhart's avatar
eckhart committed
766
767
                                    % str(redeclared) + 'Later declaration will be ignored',
                                    code=Error.REDECLARED_TOKEN_WARNING)
768
            self.directives['tokens'] |= tokens - redeclared
769

770
        elif key.endswith('_filter'):
771
            filter_set = self.compile(node.children[1])
772
            if not isinstance(filter_set, set) or len(filter_set) != 1:
eckhart's avatar
eckhart committed
773
                self.tree.new_error(node, 'Directive "%s" accepts exactly on symbol, not %s'
eckhart's avatar
eckhart committed
774
                                    % (key, str(filter_set)))
775
            self.directives['filter'][key[:-7]] = filter_set.pop()
776

777
        else:
eckhart's avatar
eckhart committed
778
            self.tree.new_error(node, 'Unknown directive %s ! (Known ones are %s .)' %
eckhart's avatar
eckhart committed
779
                                (key, ', '.join(list(self.directives.keys()))))
780
781
        return ""

782

eckhart's avatar
eckhart committed
783
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str] = []) -> str:
784
785
        """
        Compiles any non-terminal, where `parser_class` indicates the Parser class
786
787
        name for the particular non-terminal.
        """
788
        arguments = [self.compile(r) for r in node.children] + custom_args
789
790
        return parser_class + '(' + ', '.join(arguments) + ')'

791

792
    def on_expression(self, node) -> str:
di68kap's avatar
di68kap committed
793
        # TODO: Add check for errors like "a" | "ab" (which will always yield a, even for ab)
794
795
        return self.non_terminal(node, 'Alternative')

796

797
    def on_term(self, node) -> str:
di68kap's avatar
di68kap committed
798
799
800
801
802
        # Basically, the following code does only this:
        #       return self.non_terminal(node, 'Series')
        # What makes it (look) more complicated is the handling of the
        # mandatory §-operator
        mandatory_marker = []
eckhart's avatar
eckhart committed
803
        filtered_children = []  # type: List[Node]
di68kap's avatar
di68kap committed
804
        for nd in node.children:
805
            if nd.parser.ptype == TOKEN_PTYPE and nd.content == "§":
eckhart's avatar
eckhart committed
806
807
                mandatory_marker.append(len(filtered_children))
                # if len(filtered_children) == 0:
eckhart's avatar
eckhart committed
808
                #     self.tree.new_error(nd.pos, 'First item of a series should not be mandatory.',
eckhart's avatar
eckhart committed
809
                #                         Error.WARNING)
eckhart's avatar
eckhart committed
810
                if len(mandatory_marker) > 1:
eckhart's avatar
eckhart committed
811
                    self.tree.new_error(nd, 'One mandatory marker (§) sufficient to declare '
eckhart's avatar
eckhart committed
812
                                        'the rest of the series as mandatory.', Error.WARNING)
di68kap's avatar
di68kap committed
813
814
815
816
            else:
                filtered_children.append(nd)
        saved_result = node.result
        node.result = tuple(filtered_children)
eckhart's avatar
eckhart committed
817
818
819
820
821
        if len(filtered_children) == 1:
            compiled = self.non_terminal(node, 'Required')
        else:
            custom_args = ['mandatory=%i' % mandatory_marker[0]] if mandatory_marker else []
            compiled = self.non_terminal(node, 'Series', custom_args)
di68kap's avatar
di68kap committed
822
823
        node.result = saved_result
        return compiled
824

825

826
    def on_factor(self, node: Node) -> str:
827
        assert node.children
828
        assert len(node.children) >= 2, node.as_sxpr()