ebnf.py 30 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
21
from collections import OrderedDict

22
23
24
25
try:
    import regex as re
except ImportError:
    import re
Eckhart Arnold's avatar
Eckhart Arnold committed
26
from .typing import Callable, Dict, List, Set, Tuple
27

28
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name
29
30
31
32
from DHParser.parsers import Grammar, mixin_comment, nil_scanner, Forward, RE, NegativeLookahead, \
    Alternative, Sequence, Optional, Required, OneOrMore, ZeroOrMore, Token, Compiler, \
    ScannerFunc
from DHParser.syntaxtree import Node, traverse, remove_enclosing_delimiters, reduce_single_child, \
33
    replace_by_single_child, TOKEN_PTYPE, remove_expendables, remove_tokens, flatten, \
34
    forbid, assert_content, WHITESPACE_PTYPE, key_tag_name, TransformationFunc
35
from DHParser.versionnumber import __version__
36
37


38
39
40
41
42
__all__ = ['get_ebnf_scanner',
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
43
           'EBNFTransformer',
Eckhart Arnold's avatar
Eckhart Arnold committed
44
           'EBNFCompilerError',
45
           'EBNFCompiler',
46
47
48
49
50
           'grammar_changed',
           'ScannerFactoryFunc',
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
           'CompilerFactoryFunc']
51
52


Eckhart Arnold's avatar
Eckhart Arnold committed
53
54
55
56
57
58
59
########################################################################
#
# EBNF scanning
#
########################################################################


60
def get_ebnf_scanner() -> ScannerFunc:
Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
68
69
    return nil_scanner


########################################################################
#
# EBNF parsing
#
########################################################################

70
71
# TODO: Introduce dummy/rename-parser, for simple assignments (e.g. jahr = JAHRESZAHL) or substition!
# TODO: Raise Error for unconnected parsers!
72
class EBNFGrammar(Grammar):
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol §"=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { factor }+
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] group
91
                | [flowmarker] regexchain
92
93
94
95
96
97
98
99
100
                | [flowmarker] oneormore
                | repetition
                | option

    flowmarker =  "!"  | "&"  | "§" |                # '!' negative lookahead, '&' positive lookahead, '§' required
                  "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

    group      =  "(" expression §")"
101
    regexchain =  ">" expression §"<"                # compiles "expression" into a singular regular expression
102
103
    oneormore  =  "{" expression "}+"
    repetition =  "{" expression §"}"
104
105
    option     =  "[" expression §"]"

106
107
108
109
110
111
    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
112
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
113
114
115
116
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
117
    source_hash__ = "a410e1727fb7575e98ff8451dbf8f3bd"
118
    parser_initialization__ = "upon instantiation"
119
120
    COMMENT__ = r'#.*(?:\n|$)'
    WSP__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)')
121
    wspL__ = ''
122
    wspR__ = WSP__
123
    EOF = NegativeLookahead(RE('.', wR=''))
124
    list_ = Sequence(RE('\\w+'), ZeroOrMore(Sequence(Token(","), RE('\\w+'))))
125
126
127
    regexp = RE('~?/(?:[^/]|(?<=\\\\)/)*/~?')
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
128
    option = Sequence(Token("["), expression, Required(Token("]")))
129
130
    repetition = Sequence(Token("{"), expression, Required(Token("}")))
    oneormore = Sequence(Token("{"), expression, Token("}+"))
131
    regexchain = Sequence(Token("<"), expression, Required(Token(">")))
132
133
134
135
136
    group = Sequence(Token("("), expression, Required(Token(")")))
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("§"), Token("-!"), Token("-&"))
    factor = Alternative(Sequence(Optional(flowmarker), Optional(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Sequence(Optional(flowmarker), literal), Sequence(Optional(flowmarker), regexp),
137
138
                         Sequence(Optional(flowmarker), group), Sequence(Optional(flowmarker), regexchain),
                         Sequence(Optional(flowmarker), oneormore), repetition, option)
139
140
141
142
    term = OneOrMore(factor)
    expression.set(Sequence(term, ZeroOrMore(Sequence(Token("|"), term))))
    directive = Sequence(Token("@"), Required(symbol), Required(Token("=")), Alternative(regexp, literal, list_))
    definition = Sequence(symbol, Required(Token("=")), expression)
143
    syntax = Sequence(Optional(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), Required(EOF))
144
145
146
    root__ = syntax


147
def grammar_changed(grammar_class, grammar_source: str) -> bool:
Eckhart Arnold's avatar
Eckhart Arnold committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    """Returns ``True`` if ``grammar_class`` does not reflect the latest
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
167
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
168
169
170
171
172
173
174
175
176
177
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


178
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


195
196
#TODO: Add Capture and Retrieve Validation: A variable mustn't be captured twice before retrival?!?

197
EBNF_transformation_table = {
198
    # AST Transformations for EBNF-grammar
199
    "+":
200
        remove_expendables,
201
202
    "syntax":
        [],
203
    "directive, definition":
204
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
205
    "expression":
206
        [replace_by_single_child, flatten, remove_tokens('|')],
207
208
209
210
211
    "term":
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
    "factor, flowmarker, retrieveop":
        replace_by_single_child,
    "group":
212
213
214
        [remove_enclosing_delimiters, replace_by_single_child],
    "oneormore, repetition, option, regexchain":
        [reduce_single_child, remove_enclosing_delimiters],
215
    "symbol, literal, regexp":
216
        [reduce_single_child],
217
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
218
        [reduce_single_child],
219
    "list_":
220
        [flatten, remove_tokens(',')],
221
    "*":
222
        [replace_by_single_child]
223
224
}

225

226
EBNF_validation_table = {
227
    # Semantic validation on the AST. EXPERIMENTAL!
228
    "repetition, option, oneormore":
229
230
        [forbid('repetition', 'option', 'oneormore'),
         assert_content(r'(?!§)')]
231
}
232

233

234
def EBNFTransformer(syntax_tree: Node):
235
    for processing_table, key_func in [(EBNF_transformation_table, key_tag_name),
236
                                       (EBNF_validation_table, key_tag_name)]:
237
        traverse(syntax_tree, processing_table, key_func)
di68kap's avatar
di68kap committed
238
239


240
def get_ebnf_transformer() -> TransformationFunc:
241
    return EBNFTransformer
Eckhart Arnold's avatar
Eckhart Arnold committed
242
243
244
245
246
247
248
249


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

250
251
252

ScannerFactoryFunc = Callable[[], ScannerFunc]
ParserFactoryFunc = Callable[[], Grammar]
253
TransformerFactoryFunc = Callable[[], TransformationFunc]
254
255
256
CompilerFactoryFunc = Callable[[], Compiler]


257
SCANNER_FACTORY = '''
258
def get_scanner() -> ScannerFunc:
259
260
261
262
263
    return {NAME}Scanner
'''


GRAMMAR_FACTORY = '''
264
def get_grammar() -> {NAME}Grammar:
265
266
267
268
269
270
271
272
273
274
275
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
        return grammar
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
        return thread_local_{NAME}_grammar_singleton
'''


TRANSFORMER_FACTORY = '''
276
def get_transformer() -> TransformationFunc:
277
278
279
280
281
    return {NAME}Transform
'''


COMPILER_FACTORY = '''
282
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
283
284
285
286
287
288
289
290
291
292
293
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
        return compiler
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
        return thread_local_{NAME}_compiler_singleton 
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
294

295
296
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
297
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
298
299
300
    pass


301
class EBNFCompiler(Compiler):
302
303
304
305
    """Generates a Parser from an abstract syntax tree of a grammar specified
    in EBNF-Notation.
    """
    COMMENT_KEYWORD = "COMMENT__"
306
307
    WHITESPACE_KEYWORD = "WSP__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, COMMENT_KEYWORD}
308
309
    AST_ERROR = "Badly structured syntax tree. " \
                "Potentially due to erroneuos AST transformation."
310
311
312
313
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
314
315
316
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
317

318
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
319
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
320
321
322
        self._reset()

    def _reset(self):
323
        self._result = ''           # type: str
324
325
326
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
327
        self.variables = set()      # type: Set[str]
328
        # self.definition_names = []  # type: List[str]
329
330
        self.recursive = set()      # type: Set[str]
        self.root = ""              # type: str
331
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
332
                           'comment': '',
333
                           'literalws': ['right'],
334
335
336
                           'tokens': set(),     # alt. 'scanner_tokens'
                           'filter': dict(),    # alt. 'filter'
                           'testing': False }
337

Eckhart Arnold's avatar
Eckhart Arnold committed
338
    @property
339
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
340
341
        return self._result

342
    def gen_scanner_skeleton(self) -> str:
343
        name = self.grammar_name + "Scanner"
344
345
        return "def %s(text):\n    return text\n" % name \
               + SCANNER_FACTORY.format(NAME=self.grammar_name)
346

347
    def gen_transformer_skeleton(self) -> str:
348
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
349
350
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
351
352
        tt_name = self.grammar_name + '_AST_transformation_table'
        tf_name = self.grammar_name + 'Transform'
di68kap's avatar
di68kap committed
353
        transtable = [tt_name + ' = {',
354
355
                      '    # AST Transformations for the ' +
                      self.grammar_name + '-grammar']
356
        for name in self.rules:
357
358
            transtable.append('    "' + name + '": no_transformation,')
        transtable += ['    "*": no_transformation', '}', '', tf_name +
359
                       ' = partial(traverse, processing_table=%s)' % tt_name, '']
360
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
361
362
        return '\n'.join(transtable)

363
    def gen_compiler_skeleton(self) -> str:
364
        if not self.rules:
365
366
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
367
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
368
369
370
371
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
372
                    self.grammar_name + '", grammar_source=""):',
373
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
374
                    'Compiler, self).__init__(grammar_name, grammar_source)',
375
                    "        assert re.match('\w+\Z', grammar_name)", '']
376
        for name in self.rules:
377
            method_name = Compiler.derive_method_name(name)
378
            if name == self.root:
379
                compiler += ['    def ' + method_name + '(self, node):',
380
381
                             '        return node', '']
            else:
382
                compiler += ['    def ' + method_name + '(self, node):',
383
                             '        pass', '']
384
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
385
        return '\n'.join(compiler)
386

387
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
388
        # fix capture of variables that have been defined before usage [sic!]
389

390
391
392
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
393
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
394

395
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
396
                            if 'right' in self.directives['literalws'] else "''"))
397
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
398
                            if 'left' in self.directives['literalws'] else "''"))
399
        definitions.append((self.WHITESPACE_KEYWORD,
400
401
402
403
404
405
406
                            ("mixin_comment(whitespace="
                             "r'{whitespace}', comment=r'{comment}')").
                            format(**self.directives)))
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
407

408
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
409
        declarations = ['class ' + self.grammar_name +
410
                        'Grammar(Grammar):',
411
412
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
413
                        (', with this grammar:' if self.grammar_source else '.')]
414
        definitions.append(('parser_initialization__', '"upon instantiation"'))
415
        if self.grammar_source:
416
            definitions.append(('source_hash__',
417
                                '"%s"' % md5(self.grammar_source, __version__)))
418
            declarations.append('')
419
            declarations += [line for line in self.grammar_source.split('\n')]
420
421
422
423
424
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
425

426
427
428
429
430
431
432
433
434
        self.root = definitions[0][0] if definitions else ""
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
435
436
437
438
439
440
441

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
442
                root_node.error_flag = True
443
444
445

        # check for unconnected rules

446
        if not self.directives['testing']:
447
448
449
450
451
452
453
454
455
456
457
            defined_symbols.difference_update(self.RESERVED_SYMBOLS)

            def remove_connections(symbol):
                if symbol in defined_symbols:
                    defined_symbols.remove(symbol)
                    for related in self.rules[symbol][1:]:
                        remove_connections(str(related))

            remove_connections(self.root)
            for leftover in defined_symbols:
                self.rules[leftover][0].add_error(('Rule "%s" is not connected to parser '
458
459
                    'root "%s" !') % (leftover, self.root) + ' (Use directive "@testing=True" '
                    'to supress this error message.)')
460
461
462

        # set root parser and assemble python grammar definition

463
        if self.root and 'root__' not in self.rules:
464
465
            declarations.append('root__ = ' + self.root)
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
466
467
468
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
469

470
    def on_syntax(self, node: Node) -> str:
471
472
473
474
        self._reset()
        definitions = []

        # drop the wrapping sequence node
475
476
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
477
478

        # compile definitions and directives and collect definitions
479
        for nd in node.children:
480
            if nd.parser.name == "definition":
481
                definitions.append(self._compile(nd))
482
483
            else:
                assert nd.parser.name == "directive", nd.as_sexpr()
484
                self._compile(nd)
485
                node.error_flag = node.error_flag or nd.error_flag
486

487
        return self.assemble_parser(definitions, node)
488

489
    def on_definition(self, node: Node) -> Tuple[str, str]:
490
        rule = str(node.children[0])
491
492
493
        if rule in self.rules:
            node.add_error('A rule with name "%s" has already been defined.' % rule)
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
494
495
496
497
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
498
        elif rule in self.directives['tokens']:
499
500
501
502
            node.add_error('Symbol "%s" has already been defined as '
                           'a scanner token.' % rule)
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
503
                           % rule + '(This may change in the future.)')
504
        try:
505
506
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
507
            defn = self._compile(node.children[1])
508
            if rule in self.variables:
509
                defn = 'Capture(%s)' % defn
510
                self.variables.remove(rule)
511
512
513
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
514
515
516
517
        except TypeError as error:
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + node.as_sexpr()
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
518
        return rule, defn
519
520

    @staticmethod
521
    def _check_rx(node: Node, rx: str) -> str:
522
523
524
525
526
527
528
529
530
531
532
533
        """Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        rx = rx if rx.find('\n') < 0 or rx[0:4] == '(?x)' else '(?x)' + rx
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

534
    def on_directive(self, node: Node) -> str:
535
        key = str(node.children[0]).lower()
536
        assert key not in self.directives['tokens']
537

538
        if key in {'comment', 'whitespace'}:
539
540
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
541
                    node.add_error('Directive "%s" must have one, but not %i values.' %
542
543
                                   (key, len(node.children[1].result)))
                value = self._compile(node.children[1]).pop()
544
545
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
546
                else:
547
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
548
            else:
549
550
                value = str(node.children[1]).strip("~")  # cast(str, node.children[1].result).strip("~")
                if value != str(node.children[1]):  # cast(str, node.children[1].result):
551
552
553
554
555
556
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
557
558
559
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
560
            self.directives[key] = value
561

562
563
564
565
        elif key == 'testing':
            value = str(node.children[1])
            self.directives['testing'] = value.lower() not in {"off", "false", "no"}

566
        elif key == 'literalws':
567
            value = {item.lower() for item in self._compile(node.children[1])}
568
            if (len(value - {'left', 'right', 'both', 'none'}) > 0
Eckhart Arnold's avatar
Eckhart Arnold committed
569
                    or ('none' in value and len(value) > 1)):
570
571
572
573
574
575
576
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
            ws = {'left', 'right'} if 'both' in value \
                else {} if 'none' in value else value
            self.directives[key] = list(ws)

577
        elif key in {'tokens', 'scanner_tokens'}:
578
            self.directives['tokens'] |= self._compile(node.children[1])
579

580
        elif key.endswith('_filter'):
581
            filter_set = self._compile(node.children[1])
582
583
584
585
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
586

587
588
589
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
                           (key,
590
                            ', '.join(list(self.directives.keys()))))
591
592
        return ""

593
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
594
595
596
        """Compiles any non-terminal, where `parser_class` indicates the Parser class
        name for the particular non-terminal.
        """
597
        arguments = [self._compile(r) for r in node.children] + custom_args
598
599
        return parser_class + '(' + ', '.join(arguments) + ')'

600
    def on_expression(self, node) -> str:
601
602
        return self.non_terminal(node, 'Alternative')

603
    def on_term(self, node) -> str:
604
605
        return self.non_terminal(node, 'Sequence')

606
    def on_factor(self, node: Node) -> str:
607
        assert node.children
608
        assert len(node.children) >= 2, node.as_sexpr()
609
        prefix = str(node.children[0])  # cast(str, node.children[0].result)
610
        custom_args = []  # type: List[str]
611
612

        if prefix in {'::', ':'}:
613
614
            assert len(node.children) == 2
            arg = node.children[-1]
615
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
616
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
617
618
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
619
            if str(arg) in self.directives['filter']:
620
                custom_args = ['filter=%s' % self.directives['filter'][str(arg)]]
621
            self.variables.add(str(arg))  # cast(str, arg.result)
622

623
        elif len(node.children) > 2:
624
625
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
626
627
628
629
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
630

631
        node.result = node.children[1:]
632
633
634
635
636
        try:
            parser_class = self.PREFIX_TABLE[prefix]
            return self.non_terminal(node, parser_class, custom_args)
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
637
        return ""
638

639
    def on_option(self, node) -> str:
640
641
        return self.non_terminal(node, 'Optional')

642
    def on_repetition(self, node) -> str:
643
644
        return self.non_terminal(node, 'ZeroOrMore')

645
    def on_oneormore(self, node) -> str:
646
647
        return self.non_terminal(node, 'OneOrMore')

648
    def on_regexchain(self, node) -> str:
649
650
        raise EBNFCompilerError("Not yet implemented!")

651
    def on_group(self, node) -> str:
652
653
654
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

655
656
657
658
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
        symbol = str(node)  # ; assert result == cast(str, node.result)
        if symbol in self.directives['tokens']:
            return 'ScannerToken("' + symbol + '")'
659
        else:
660
661
662
663
664
665
            self.current_symbols.append(node)
            if symbol not in self.symbols:
                self.symbols[symbol] = node
            if symbol in self.rules:
                self.recursive.add(symbol)
            return symbol
666

667
    def on_literal(self, node) -> str:
668
        return 'Token(' + str(node).replace('\\', r'\\') + ')'  # return 'Token(' + ', '.join([node.result]) + ')' ?
669

670
    def on_regexp(self, node: Node) -> str:
671
        rx = str(node)
672
        name = []   # type: List[str]
673
674
        if rx[:2] == '~/':
            if not 'left' in self.directives['literalws']:
675
                name = ['wL=' + self.WHITESPACE_KEYWORD] + name
676
677
678
679
            rx = rx[1:]
        elif 'left' in self.directives['literalws']:
            name = ["wL=''"] + name
        if rx[-2:] == '/~':
Eckhart Arnold's avatar
Eckhart Arnold committed
680
            if 'right' not in self.directives['literalws']:
681
                name = ['wR=' + self.WHITESPACE_KEYWORD] + name
682
683
684
685
686
687
688
689
690
691
692
693
            rx = rx[:-1]
        elif 'right' in self.directives['literalws']:
            name = ["wR=''"] + name
        try:
            arg = repr(self._check_rx(node, rx[1:-1].replace(r'\/', '/')))
        except AttributeError as error:
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + \
                     node.as_sexpr()
            node.add_error(errmsg)
            return '"' + errmsg + '"'
        return 'RE(' + ', '.join([arg] + name) + ')'

694
    def on_list_(self, node) -> Set[str]:
695
        assert node.children
696
        return set(item.result.strip() for item in node.children)
697
698


699
def get_ebnf_compiler(grammar_name="", grammar_source="") -> EBNFCompiler:
Eckhart Arnold's avatar
Eckhart Arnold committed
700
701
702
703
704
705
706
707
    global thread_local_ebnf_compiler_singleton
    try:
        compiler = thread_local_ebnf_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
        return compiler
    except NameError:
        thread_local_ebnf_compiler_singleton = EBNFCompiler(grammar_name, grammar_source)
        return thread_local_ebnf_compiler_singleton