ebnf.py 69 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17
18


19
20
21
22
23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24
25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29
30
import keyword
import os
Eckhart Arnold's avatar
Eckhart Arnold committed
31
from typing import Callable, Dict, List, Set, Tuple, Sequence, Union, Optional, Any, cast
32

eckhart's avatar
eckhart committed
33
from DHParser.compile import CompilerError, Compiler, compile_source, visitor_name
eckhart's avatar
eckhart committed
34
from DHParser.configuration import THREAD_LOCALS, get_config_value
35
from DHParser.error import Error
Eckhart Arnold's avatar
Eckhart Arnold committed
36
from DHParser.parse import Grammar, mixin_comment, mixin_noempty, Forward, RegExp, \
37
    DropRegExp, NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, \
Eckhart Arnold's avatar
Eckhart Arnold committed
38
    Token, GrammarError
39
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
40
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
41
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
42
    unrepr, compile_python_object, DHPARSER_PARENTDIR, RX_NEVER_MATCH
eckhart's avatar
eckhart committed
43
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
44
    reduce_single_child, replace_by_single_child, remove_whitespace, remove_empty, \
eckhart's avatar
eckhart committed
45
    remove_tokens, flatten, forbid, assert_content
46
from DHParser.versionnumber import __version__
Eckhart Arnold's avatar
Eckhart Arnold committed
47

eckhart's avatar
eckhart committed
48

49
__all__ = ('get_ebnf_preprocessor',
50
51
52
53
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
54
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
55
           'EBNFCompilerError',
56
           'EBNFCompiler',
57
           'grammar_changed',
eckhart's avatar
eckhart committed
58
           'compile_ebnf',
59
           'PreprocessorFactoryFunc',
60
61
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
62
           'CompilerFactoryFunc')
63
64


65
66
67
68
69
70
71
72
73
74
75
76
77
########################################################################
#
# source code support
#
########################################################################


DHPARSER_IMPORTS = '''
import collections
from functools import partial
import os
import sys

78
79
if r'{dhparser_parentdir}' not in sys.path:
    sys.path.append(r'{dhparser_parentdir}')
80
81
82
83
84

try:
    import regex as re
except ImportError:
    import re
85
from DHParser import start_logging, suspend_logging, resume_logging, is_filename, load_if_file, \\
86
87
    Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, Drop, \\
    Lookbehind, Lookahead, Alternative, Pop, Token, Synonym, AllOf, SomeOf, \\
88
89
    Unordered, Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \\
    ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \\
90
    grammar_changed, last_value, counterpart, PreprocessorFunc, is_empty, remove_if, \\
91
92
93
    Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \\
    remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \\
    reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
di68kap's avatar
di68kap committed
94
    replace_by_children, remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \\
95
    merge_adjacent, collapse, collapse_children_if, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
Eckhart Arnold's avatar
Eckhart Arnold committed
96
    remove_nodes, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \\
97
    keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \\
98
99
    remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \\
    replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \\
100
101
    error_on, recompile_grammar, left_associative, lean_left, set_config_value, \\
    get_config_value, XML_SERIALIZATION, SXPRESSION_SERIALIZATION, COMPACT_SERIALIZATION, \\
102
    JSON_SERIALIZATION, access_thread_locals, access_presets, finalize_presets, ErrorCode, \\
103
    RX_NEVER_MATCH, set_tracer, resume_notices_on, trace_history
eckhart's avatar
eckhart committed
104
'''.format(dhparser_parentdir=DHPARSER_PARENTDIR)
105
106


Eckhart Arnold's avatar
Eckhart Arnold committed
107
108
109
110
111
112
113
########################################################################
#
# EBNF scanning
#
########################################################################


114
def get_ebnf_preprocessor() -> PreprocessorFunc:
eckhart's avatar
eckhart committed
115
116
117
118
119
    """
    Returns the preprocessor function for the EBNF compiler.
    As of now, no preprocessing is needed for EBNF-sources. Therefore,
    just a dummy function is returned.
    """
120
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
121
122
123
124
125
126
127
128


########################################################################
#
# EBNF parsing
#
########################################################################

129

di68kap's avatar
di68kap committed
130
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
131
    r"""
eckhart's avatar
eckhart committed
132
133
134
135
136
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly
137
138
139
    @ drop       = whitespace                       # do not include whitespace in concrete syntax tree

    #: top-level
eckhart's avatar
eckhart committed
140
141
142
143
144

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

145
146
    #: components

eckhart's avatar
eckhart committed
147
148
149
150
151
152
153
154
155
156
157
158
159
    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

160
161
    #: flow-operators

eckhart's avatar
eckhart committed
162
163
164
165
    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

166
167
    #: groups

eckhart's avatar
eckhart committed
168
169
170
171
172
173
    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

174
175
    #: leaf-elements

eckhart's avatar
eckhart committed
176
    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
177
178
179
180
    literal    = /"(?:(?<!\\)\\"|[^"])*?"/~         # e.g. "(", '+', 'while'
               | /'(?:(?<!\\)\\'|[^'])*?'/~         # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:(?<!\\)\\`|[^`])*?`/~         # like literal but does not eat whitespace
    regexp     = /\/(?:(?<!\\)\\(?:\/)|[^\/])*?\//~     # e.g. /\w+/, ~/#.*(?:\n|$)/~
eckhart's avatar
eckhart committed
181
182
183
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
184
    """
di68kap's avatar
di68kap committed
185
    expression = Forward()
eckhart's avatar
eckhart committed
186
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
187
    static_analysis_pending__ = []
eckhart's avatar
eckhart committed
188
    parser_initialization__ = ["upon instantiation"]
di68kap's avatar
di68kap committed
189
190
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
191
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
192
    wsp__ = DropRegExp(WSP_RE__)
di68kap's avatar
di68kap committed
193
    EOF = NegativeLookahead(RegExp('.'))
194
    whitespace = Series(RegExp('~'), wsp__)
195
    regexp = Series(RegExp('/(?:(?<!\\\\)\\\\(?:/)|[^/])*?/'), wsp__)
eckhart's avatar
eckhart committed
196
    plaintext = Series(RegExp('`(?:(?<!\\\\)\\\\`|[^`])*?`'), wsp__)
197
198
    literal = Alternative(Series(RegExp('"(?:(?<!\\\\)\\\\"|[^"])*?"'), wsp__),
                          Series(RegExp("'(?:(?<!\\\\)\\\\'|[^'])*?'"), wsp__))
199
200
201
202
203
204
205
206
207
208
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
209
210
211
212
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
213
214
215
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
216
217
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
218
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
219
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
220
221
222
    root__ = syntax


223
def grammar_changed(grammar_class, grammar_source: str) -> bool:
224
225
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
244
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
245
246
247
248
249
250
251
252
253
254
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


255
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
256
    try:
257
        grammar = THREAD_LOCALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
258
        return grammar
259
    except AttributeError:
260
261
        THREAD_LOCALS.ebnf_grammar_singleton = EBNFGrammar()
        return THREAD_LOCALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
262
263
264
265
266
267
268
269
270


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


271
EBNF_AST_transformation_table = {
272
    # AST Transformations for EBNF-grammar
273
    "<":
274
        [remove_empty],  # remove_whitespace
275
    "syntax":
276
        [],  # otherwise '"*": replace_by_single_child' would be applied
277
    "directive, definition":
eckhart's avatar
eckhart committed
278
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
279
    "expression":
280
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
281
    "term":
282
283
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
284
    "factor, flowmarker, retrieveop":
285
        replace_by_single_child,
286
    "group":
287
        [remove_brackets, replace_by_single_child],
288
289
    "unordered":
        remove_brackets,
290
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
291
        [reduce_single_child, remove_brackets,
292
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
293
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
294
        reduce_single_child,
295
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
296
        reduce_single_child,
297
    "*":
298
        replace_by_single_child
299
300
}

301

Eckhart Arnold's avatar
Eckhart Arnold committed
302
def EBNFTransform() -> TransformationFunc:
303
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
304

eckhart's avatar
eckhart committed
305

306
def get_ebnf_transformer() -> TransformationFunc:
307
    try:
308
        transformer = THREAD_LOCALS.EBNF_transformer_singleton
309
    except AttributeError:
310
311
        THREAD_LOCALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = THREAD_LOCALS.EBNF_transformer_singleton
312
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
313
314
315
316
317
318
319
320


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

321

322
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
323
ParserFactoryFunc = Callable[[], Grammar]
324
TransformerFactoryFunc = Callable[[], TransformationFunc]
325
326
CompilerFactoryFunc = Callable[[], Compiler]

327
328
329
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
330
331
332
333
'''


GRAMMAR_FACTORY = '''
334
def get_grammar() -> {NAME}Grammar:
335
    """Returns a thread/process-exclusive {NAME}Grammar-singleton."""
336
    THREAD_LOCALS = access_thread_locals()    
337
    try:
338
        grammar = THREAD_LOCALS.{NAME}_{ID:08d}_grammar_singleton
339
    except AttributeError:
340
        THREAD_LOCALS.{NAME}_{ID:08d}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
341
        if hasattr(get_grammar, 'python_src__'):
342
343
            THREAD_LOCALS.{NAME}_{ID:08d}_grammar_singleton.python_src__ = get_grammar.python_src__
        grammar = THREAD_LOCALS.{NAME}_{ID:08d}_grammar_singleton
344
345
346
347
    if get_config_value('resume_notices'):
        resume_notices_on(grammar)
    elif get_config_value('history_tracking'):
        set_tracer(grammar, trace_history)
348
    return grammar
349
350
351
352
'''


TRANSFORMER_FACTORY = '''
353
354
355
def Create{NAME}Transformer() -> TransformationFunc:
    """Creates a transformation function that does not share state with other
    threads or processes."""
356
357
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

358
def get_transformer() -> TransformationFunc:
359
    """Returns a thread/process-exclusive transformation function."""
360
    THREAD_LOCALS = access_thread_locals()
361
    try:
362
        transformer = THREAD_LOCALS.{NAME}_{ID:08d}_transformer_singleton
363
    except AttributeError:
364
365
        THREAD_LOCALS.{NAME}_{ID:08d}_transformer_singleton = Create{NAME}Transformer()
        transformer = THREAD_LOCALS.{NAME}_{ID:08d}_transformer_singleton
366
    return transformer
367
368
369
370
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
371
def get_compiler() -> {NAME}Compiler:
372
    """Returns a thread/process-exclusive {NAME}Compiler-singleton."""
373
    THREAD_LOCALS = access_thread_locals()
374
    try:
375
        compiler = THREAD_LOCALS.{NAME}_{ID:08d}_compiler_singleton
376
    except AttributeError:
377
378
        THREAD_LOCALS.{NAME}_{ID:08d}_compiler_singleton = {NAME}Compiler()
        compiler = THREAD_LOCALS.{NAME}_{ID:08d}_compiler_singleton
379
    return compiler
380
381
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
382

383
384
385
386
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}

387
388
DROP_TOKEN  = 'token'
DROP_WSPC   = 'whitespace'
389
390
DROP_REGEXP = 'regexp'
DROP_VALUES = {DROP_TOKEN, DROP_WSPC, DROP_REGEXP}
391

eckhart's avatar
eckhart committed
392
393
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
394
395


396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

423
424
425
426
427
428
429
430
431
432
433
434
        skip:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for the series-parser when a mandatory item failed to
                match the following text.

        resume:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for after a parsing error has error occurred. Other
                than the skip field, this configures resuming after
                the failing parser has returned.
435
436

        drop:   A set that may contain the elements `DROP_TOKEN` and
437
438
439
440
                `DROP_WSP', 'DROP_REGEXP' or any name of a symbol
                of an anonymous parser (e.g. '_linefeed') the results
                of which will be dropped during the parsing process,
                already.
441
442
443
444
445
446

        super_ws(property): Cache for the "super whitespace" which
                is a regular expression that merges whitespace and
                comments. This property should only be accessed after
                the `whitespace` and `comment` field have been filled
                with the values parsed from the EBNF source.
447
    """
448
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'skip',
449
                 'resume', 'drop', '_super_ws']
eckhart's avatar
eckhart committed
450

451
452
453
454
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
455
        self.tokens = set()   # type: Collection[str]
456
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
457
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
458
        self.skip = dict()    # type: Dict[str, List[Union[unrepr, str]]]
459
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]
460
        self.drop = set()     # type: Set[str]
461
        self._super_ws = None # type: Optional[str]
462
463
464
465
466
467
468
469

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

470
471
472
473
474
475
    @property
    def super_ws(self):
        if self._super_ws is None:
            self._super_ws = mixin_comment(self.whitespace, self.comment)
        return self._super_ws

476
    def keys(self):
477
        return self.__slots__
478
479


eckhart's avatar
eckhart committed
480
class EBNFCompilerError(CompilerError):
481
    """Error raised by `EBNFCompiler` class. (Not compilation errors
482
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
483
484
485
    pass


486
class EBNFCompiler(Compiler):
487
488
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
489
    in EBNF-Notation.
490
491
492
493
494
495
496

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

497
    Additionally, class EBNFCompiler provides helper methods to generate
498
499
500
501
502
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
503
        current_symbols:  During compilation, a list containing the root
504
505
506
507
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

508
        rules:  Dictionary that maps rule names to a list of Nodes that
509
510
511
512
513
514
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

515
                Now `[node.content for node in self.rules['alternative']]`
516
517
                yields `['alternative = a | b', 'a', 'b']`

518
        symbols:  A mapping of symbol names to their first usage (not
519
520
                their definition!) in the EBNF source.

521
        variables:  A set of symbols names that are used with the
522
523
524
525
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

526
        recursive:  A set of symbols that are used recursively and
527
528
                therefore require a `Forward`-operator.

529
        definitions:  A dictionary of definitions. Other than `rules`
530
531
                this maps the symbols to their compiled definienda.

532
533
534
535
        required_keywords: A list of keywords (like `comment__` or
                `whitespace__` that need to be defined at the beginning
                of the grammar class because they are referred to later.

536
        deferred_tasks:  A list of callables that is filled during
537
538
539
540
541
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

542
        root_symbol: The name of the root symbol.
543

di68kap's avatar
di68kap committed
544
545
546
547
548
        drop_flag: This flag is set temporarily when compiling the definition
                of a parser that shall drop its content. If this flag is
                set all contained parser will also drop their content as an
                optimization.

549
550
551
552
553
554
555
556
557
558
559
560
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
561
562
563
564
                `test_ebnf.TestErrorCustomization`.

        consumed_skip_rules: The same as `consumed_custom_errors` only for
                in-series-resume-rules (aka 'skip-rules') for Series-parsers.
565
566
567

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
568

569
570
571
572
573
574
575
        anonymous_regexp: A regular expression to identify symbols that stand
                for parsers that shall yield anonymous nodes. The pattern of
                the regular expression is configured in configuration.py but
                can also be set by a directive. The default value is a regular
                expression that catches names with a leading underscore.
                See also `parser.Grammar.anonymous__`

eckhart's avatar
eckhart committed
576
577
578
579
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

580
581
582
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
583
584
    """
    COMMENT_KEYWORD = "COMMENT__"
585
    COMMENT_PARSER_KEYWORD = "comment__"
586
    DROP_COMMENT_PARSER_KEYWORD = "dcomment__"
587
    COMMENT_RX_KEYWORD = "comment_rx__"
588
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
589
    RAW_WS_KEYWORD = "WHITESPACE__"
590
    RAW_WS_PARSER_KEYWORD = "whitespace__"
591
    DROP_RAW_WS_PARSER_KEYWORD = "dwhitespace__"
592
    WHITESPACE_PARSER_KEYWORD = "wsp__"
593
    DROP_WHITESPACE_PARSER_KEYWORD = "dwsp__"
594
    RESUME_RULES_KEYWORD = "resume_rules__"
595
    SKIP_RULES_SUFFIX = '_skip__'
eckhart's avatar
eckhart committed
596
    ERR_MSG_SUFFIX = '_err_msg__'
di68kap's avatar
di68kap committed
597
598
599
600
601
602
603
    COMMENT_OR_WHITESPACE = {COMMENT_PARSER_KEYWORD, DROP_COMMENT_PARSER_KEYWORD,
                             RAW_WS_PARSER_KEYWORD, DROP_RAW_WS_PARSER_KEYWORD,
                             WHITESPACE_PARSER_KEYWORD, DROP_WHITESPACE_PARSER_KEYWORD}
    RESERVED_SYMBOLS = {COMMENT_KEYWORD, COMMENT_RX_KEYWORD, COMMENT_PARSER_KEYWORD,
                        WHITESPACE_KEYWORD, RAW_WS_KEYWORD, RAW_WS_PARSER_KEYWORD,
                        WHITESPACE_PARSER_KEYWORD, DROP_WHITESPACE_PARSER_KEYWORD,
                        RESUME_RULES_KEYWORD}
604
    KEYWORD_SUBSTITUTION = {COMMENT_KEYWORD: COMMENT_PARSER_KEYWORD,
di68kap's avatar
di68kap committed
605
                            COMMENT_PARSER_KEYWORD: COMMENT_PARSER_KEYWORD,
606
                            RAW_WS_KEYWORD: RAW_WS_PARSER_KEYWORD,
di68kap's avatar
di68kap committed
607
608
609
610
                            RAW_WS_PARSER_KEYWORD: RAW_WS_PARSER_KEYWORD,
                            WHITESPACE_KEYWORD: WHITESPACE_PARSER_KEYWORD,
                            WHITESPACE_PARSER_KEYWORD: WHITESPACE_PARSER_KEYWORD,
                            DROP_WHITESPACE_PARSER_KEYWORD: DROP_WHITESPACE_PARSER_KEYWORD}
611
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
612
                "Potentially due to erroneous AST transformation."
613
614
615
616
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
617
    REPEATABLE_DIRECTIVES = {'tokens'}
618

619

eckhart's avatar
eckhart committed
620
    def __init__(self, grammar_name="DSL", grammar_source=""):
eckhart's avatar
eckhart committed
621
        self.grammar_id = 0  # type: int
622
        super(EBNFCompiler, self).__init__()  # calls the reset()-method
eckhart's avatar
eckhart committed
623
        self.set_grammar_name(grammar_name, grammar_source)
624

625

626
627
    def reset(self):
        super(EBNFCompiler, self).reset()
628
629
630
631
632
633
634
635
636
637
638
        self._result = ''               # type: str
        self.re_flags = set()           # type: Set[str]
        self.rules = OrderedDict()      # type: OrderedDict[str, List[Node]]
        self.current_symbols = []       # type: List[Node]
        self.symbols = {}               # type: Dict[str, Node]
        self.variables = set()          # type: Set[str]
        self.recursive = set()          # type: Set[str]
        self.definitions = {}           # type: Dict[str, str]
        self.required_keywords = set()  # type: Set[str]
        self.deferred_tasks = []        # type: List[Callable]
        self.root_symbol = ""           # type: str
di68kap's avatar
di68kap committed
639
        self.drop_flag = False          # type: bool
640
641
642
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
643
        self.consumed_skip_rules = set()     # type: Set[str]
644
        self.anonymous_regexp = re.compile(get_config_value('default_anonymous_regexp'))
645
646
        self.grammar_id += 1

647

Eckhart Arnold's avatar
Eckhart Arnold committed
648
    @property
649
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
650
651
        return self._result

eckhart's avatar
eckhart committed
652
653
654
655
656
657
658
659
660
661
662
663

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
Eckhart Arnold's avatar
Eckhart Arnold committed
664
        self.grammar_name = grammar_name or "NameUnknown"
eckhart's avatar
eckhart committed
665
666
667
668
        self.grammar_source = load_if_file(grammar_source)
        return self


669
    # methods for generating skeleton code for preprocessor, transformer, and compiler
670

671
    def gen_preprocessor_skeleton(self) -> str:
672
673
674
675
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
676
        name = self.grammar_name + "Preprocessor"
677
        return "def %s(text):\n    return text, lambda i: i\n" % name \
678
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
679

680

681
    def gen_transformer_skeleton(self) -> str:
682
683
684
685
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
686
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
687
688
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
689
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
690
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
691
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
692
        transtable.append('    "<": flatten,')
693
        for name in self.rules:
eckhart's avatar
eckhart committed
694
            transformations = '[]'
695
696
697
698
699
            # rule = self.definitions[name]
            # if rule.startswith('Alternative'):
            #     transformations = '[replace_or_reduce]'
            # elif rule.startswith('Synonym'):
            #     transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
700
            transtable.append('    "' + name + '": %s,' % transformations)
701
        # transtable.append('    ":Token": reduce_single_child,')
eckhart's avatar
eckhart committed
702
        transtable += ['    "*": replace_by_single_child', '}', '']
703
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
704
705
        return '\n'.join(transtable)

706

707
    def gen_compiler_skeleton(self) -> str:
708
709
710
711
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
712
        if not self.rules:
713
714
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
715
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
716
717
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
718
                    '    """', '',
eckhart's avatar
eckhart committed
719
720
721
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
722
723
                    '    def reset(self):',
                    '        super().reset()',
eckhart's avatar
eckhart committed
724
725
                    '        # initialize your variables here, not in the constructor!',
                    '']
726
        for name in self.rules:
eckhart's avatar
eckhart committed
727
            method_name = visitor_name(name)
728
            if name == self.root_symbol:
729
                compiler += ['    def ' + method_name + '(self, node):',
730
                             '        return self.fallback_compiler(node)', '']
731
            else:
di68kap's avatar
di68kap committed
732
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
733
                             '    #     return node', '']
734
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
735
        return '\n'.join(compiler)
736

737
    def verify_transformation_table(self, transtable):
738
739
740
741
742
743
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
744
        assert self._dirty_flag
745
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
746
747
748
749
750
751
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
752
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING))
753
754
        return messages

755
756
757
758
759
760
761
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
762

763
764
765
766

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
767
768
        expression. Makes sure that multi-line regular expressions are
        prepended by the multi-line-flag. Returns the regular expression string.
769
        """
770
        # TODO: Support atomic grouping: https://stackoverflow.com/questions/13577372/do-python-regular-expressions-have-an-equivalent-to-rubys-atomic-grouping
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
787
        elif value[0] + value[-1] == '//' and value != '//':
788
789
790
791
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
792
    def _gen_search_rule(self, nd: Node) -> ReprType:
793
794
795
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
796
        """
797
        if nd.tag_name == 'regexp':
Eckhart Arnold's avatar
Eckhart Arnold committed
798
799
800
801
            super_ws = self.directives.super_ws
            noempty_ws = mixin_noempty(super_ws)
            search_regex = self._extract_regex(nd)\
                .replace(r'\~!', noempty_ws).replace(r'\~', super_ws)
802
            return unrepr("re.compile(r'%s')" % search_regex)
803
        elif nd.tag_name == 'literal':
804
            s = nd.content[1:-1]  # remove quotation marks
805
            return unrepr("re.compile(r'(?=%s)')" % escape_re(s))
806
807
        return ''

808

809
810
811
812
813
814
815
    def _gen_search_list(self, nodes: Sequence[Node]) -> List[Union[unrepr, str]]:
        search_list = []  # type: List[Union[unrepr, str]]
        for child in nodes:
            rule = self._gen_search_rule(child)
            search_list.append(rule if rule else unrepr(child.content.strip()))
        return search_list

816

817
818
819
820
821
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
822
823
824
825
826
827
828
829

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
di68kap's avatar
di68kap committed
830
        # value of which will be retrieved at some point during the parsing process
831

832
833
834
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
835
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
836

837
838
        # add special fields for Grammar class

839
        if DROP_WSPC in self.directives.drop or DROP_TOKEN in self.directives.drop:
840
            definitions.append((EBNFCompiler.DROP_WHITESPACE_PARSER_KEYWORD,
841
                                'Drop(Whitespace(%s))' % EBNFCompiler.WHITESPACE_KEYWORD))
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
        definitions.append((EBNFCompiler.WHITESPACE_PARSER_KEYWORD,
                            'Whitespace(%s)' % EBNFCompiler.WHITESPACE_KEYWORD))
        definitions.append((EBNFCompiler.WHITESPACE_KEYWORD,
                            ("mixin_comment(whitespace=" + EBNFCompiler.RAW_WS_KEYWORD
                             + ", comment=" + EBNFCompiler.COMMENT_KEYWORD + ")")))
        if EBNFCompiler.RAW_WS_PARSER_KEYWORD in self.required_keywords:
            definitions.append((EBNFCompiler.RAW_WS_PARSER_KEYWORD,
                                "Whitespace(%s)" % EBNFCompiler.RAW_WS_KEYWORD))
        definitions.append((EBNFCompiler.RAW_WS_KEYWORD,
                            "r'{}'".format(self.directives.whitespace)))
        comment_rx = ("re.compile(%s)" % EBNFCompiler.COMMENT_KEYWORD) \
            if self.directives.comment else "RX_NEVER_MATCH"
        if EBNFCompiler.COMMENT_PARSER_KEYWORD in self.required_keywords:
            definitions.append((EBNFCompiler.COMMENT_PARSER_KEYWORD,
                                "RegExp(%s)" % EBNFCompiler.COMMENT_RX_KEYWORD))
        definitions.append((EBNFCompiler.COMMENT_RX_KEYWORD, comment_rx))
        definitions.append((EBNFCompiler.COMMENT_KEYWORD,
                            "r'{}'".format(self.directives.comment)))
860
861
862

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
863
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
864
        for symbol, raw_rules in self.directives.resume.items():
865
866
867
868
869
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
870
                        refined = self._gen_search_rule(nd)
871
872
873
874
875
876
877
878
879
880
881
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
882

eckhart's avatar
eckhart committed
883
884
        # prepare and add customized error-messages

885
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
886
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
887
888
889
890
891
892
893
894
895
896
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

897
898
        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_custom_errors:
di68kap's avatar
di68kap committed
899
900
901
902
903
904
905
906
907
908
909
910
911
912
                try:
                    def_node = self.rules[symbol][0]
                    self.tree.new_error(
                        def_node, 'Customized error message for symbol "{}" will never be used, '
                        'because the mandatory marker "§" appears nowhere in its definiendum!'
                        .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
                except KeyError:
                    def match_function(nd: Node) -> bool:
                        return nd.children and nd.children[0].content.startswith(symbol + '_')
                    dir_node = self.tree.pick(match_function)
                    directive = dir_node.children[0].content
                    self.tree.new_error(
                        dir_node, 'Directive "{}" relates to undefined symbol "{}"!'
                        .format(directive, directive.split('_')[0]))
913
914
915
916

        # prepare and add skip-rules

        for symbol, skip in self.directives.skip.items():
Eckhart Arnold's avatar
Eckhart Arnold committed
917
            skip_rules = []  # type: List[ReprType]
918
919
920
921
922
923
924
925
926
927
            for search in skip:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                skip_rules.append(search)
            definitions.append((symbol + self.SKIP_RULES_SUFFIX, repr(skip_rules)))

di68kap's avatar
di68kap committed
928
        for symbol in self.directives.skip.keys():
929
            if symbol not in self.consumed_skip_rules:
di68kap's avatar
di68kap committed
930
931
932
933
934
935
936
937
                try:
                    def_node = self.rules[symbol][0]
                    self.tree.new_error(
                        def_node, '"Skip-rules" for symbol "{}" will never be used, '
                        'because the mandatory marker "§" appears nowhere in its definiendum!'
                        .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
                except KeyError:
                    pass  # error has already been notified earlier!
938

939
940
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
941

942
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
943
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
944
945
946
947
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
948
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
eckhart's avatar
eckhart committed
949
        definitions.append(('parser_initialization__', '["upon instantiation"]'))
950
        definitions.append(('static_analysis_pending__', '[True]'))
951
952
        definitions.append(('anonymous__',
                            're.compile(' + repr(self.anonymous_regexp.pattern) + ')'))
953
        if self.grammar_source:
954
            definitions.append(('source_hash__',
955
                                '"%s"' % md5(self.grammar_source, __version__)))
956
            declarations.append('')
957
958
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
959
960
961
962
963
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
964

965
        self.root_symbol = definitions[0][0] if definitions else ""
966
967
968
969
970
971
972
973
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
974
975
976
977
978

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
979
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
980
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
981
                                    "Missing definition for symbol '%s'" % symbol)
982
                # root_node.error_flag = True
983
984