ebnf.py 30.1 KB
Newer Older
1
"""ebnf.py - EBNF -> Python-Parser compilation for DHParser
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

Copyright 2016  by Eckhart Arnold (arnold@badw.de)
                Bavarian Academy of Sciences an Humanities (badw.de)

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied.  See the License for the specific language governing
permissions and limitations under the License.
"""

19
import keyword
20
21
from collections import OrderedDict

22
23
24
25
try:
    import regex as re
except ImportError:
    import re
Eckhart Arnold's avatar
Eckhart Arnold committed
26
from .typing import Callable, Dict, List, Set, Tuple
27

28
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name
29
30
31
32
from DHParser.parsers import Grammar, mixin_comment, nil_scanner, Forward, RE, NegativeLookahead, \
    Alternative, Sequence, Optional, Required, OneOrMore, ZeroOrMore, Token, Compiler, \
    ScannerFunc
from DHParser.syntaxtree import Node, traverse, remove_enclosing_delimiters, reduce_single_child, \
33
    replace_by_single_child, TOKEN_PTYPE, remove_expendables, remove_tokens, flatten, \
34
    forbid, assert_content, WHITESPACE_PTYPE, key_tag_name, TransformationFunc
35
from DHParser.versionnumber import __version__
36
37


38
39
40
41
42
__all__ = ['get_ebnf_scanner',
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
43
           'EBNFTransformer',
Eckhart Arnold's avatar
Eckhart Arnold committed
44
           'EBNFCompilerError',
45
           'EBNFCompiler',
46
47
48
49
50
           'grammar_changed',
           'ScannerFactoryFunc',
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
           'CompilerFactoryFunc']
51
52


Eckhart Arnold's avatar
Eckhart Arnold committed
53
54
55
56
57
58
59
########################################################################
#
# EBNF scanning
#
########################################################################


60
def get_ebnf_scanner() -> ScannerFunc:
Eckhart Arnold's avatar
Eckhart Arnold committed
61
62
63
64
65
66
67
68
69
    return nil_scanner


########################################################################
#
# EBNF parsing
#
########################################################################

70
71
# TODO: Introduce dummy/rename-parser, for simple assignments (e.g. jahr = JAHRESZAHL) or substition!
# TODO: Raise Error for unconnected parsers!
72
class EBNFGrammar(Grammar):
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    r"""Parser for an EBNF source file, with this grammar:

    # EBNF-Grammar in EBNF

    @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace =  /\s*/                            # whitespace includes linefeed
    @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

    syntax     =  [~//] { definition | directive } §EOF
    definition =  symbol §"=" expression
    directive  =  "@" §symbol §"=" ( regexp | literal | list_ )

    expression =  term { "|" term }
    term       =  { factor }+
    factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                | [flowmarker] literal
                | [flowmarker] regexp
                | [flowmarker] group
91
                | [flowmarker] regexchain
92
93
94
95
96
97
98
99
100
                | [flowmarker] oneormore
                | repetition
                | option

    flowmarker =  "!"  | "&"  | "§" |                # '!' negative lookahead, '&' positive lookahead, '§' required
                  "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

    group      =  "(" expression §")"
101
    regexchain =  ">" expression §"<"                # compiles "expression" into a singular regular expression
102
103
    oneormore  =  "{" expression "}+"
    repetition =  "{" expression §"}"
104
105
    option     =  "[" expression §"]"

106
107
108
109
110
111
    symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
    literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
    regexp     =  /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                     # '~' is a whitespace-marker, if present leading or trailing
                                                     # whitespace of a regular expression will be ignored tacitly.
112
    list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
113
114
115
116
                                                     # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
    EOF =  !/./
    """
    expression = Forward()
117
    source_hash__ = "a410e1727fb7575e98ff8451dbf8f3bd"
118
    parser_initialization__ = "upon instantiation"
119
120
    COMMENT__ = r'#.*(?:\n|$)'
    WSP__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)')
121
    wspL__ = ''
122
    wspR__ = WSP__
123
    EOF = NegativeLookahead(RE('.', wR=''))
124
    list_ = Sequence(RE('\\w+'), ZeroOrMore(Sequence(Token(","), RE('\\w+'))))
125
126
127
    regexp = RE('~?/(?:[^/]|(?<=\\\\)/)*/~?')
    literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'"))
    symbol = RE('(?!\\d)\\w+')
128
    option = Sequence(Token("["), expression, Required(Token("]")))
129
130
    repetition = Sequence(Token("{"), expression, Required(Token("}")))
    oneormore = Sequence(Token("{"), expression, Token("}+"))
131
    regexchain = Sequence(Token("<"), expression, Required(Token(">")))
132
133
134
135
136
    group = Sequence(Token("("), expression, Required(Token(")")))
    retrieveop = Alternative(Token("::"), Token(":"))
    flowmarker = Alternative(Token("!"), Token("&"), Token("§"), Token("-!"), Token("-&"))
    factor = Alternative(Sequence(Optional(flowmarker), Optional(retrieveop), symbol, NegativeLookahead(Token("="))),
                         Sequence(Optional(flowmarker), literal), Sequence(Optional(flowmarker), regexp),
137
138
                         Sequence(Optional(flowmarker), group), Sequence(Optional(flowmarker), regexchain),
                         Sequence(Optional(flowmarker), oneormore), repetition, option)
139
140
141
142
    term = OneOrMore(factor)
    expression.set(Sequence(term, ZeroOrMore(Sequence(Token("|"), term))))
    directive = Sequence(Token("@"), Required(symbol), Required(Token("=")), Alternative(regexp, literal, list_))
    definition = Sequence(symbol, Required(Token("=")), expression)
143
    syntax = Sequence(Optional(RE('', wR='', wL=WSP__)), ZeroOrMore(Alternative(definition, directive)), Required(EOF))
144
145
146
    root__ = syntax


147
def grammar_changed(grammar_class, grammar_source: str) -> bool:
Eckhart Arnold's avatar
Eckhart Arnold committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    """Returns ``True`` if ``grammar_class`` does not reflect the latest
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
167
        m = re.search('class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
168
169
170
171
172
173
174
175
176
177
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


178
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
    global thread_local_ebnf_grammar_singleton
    try:
        grammar = thread_local_ebnf_grammar_singleton
        return grammar
    except NameError:
        thread_local_ebnf_grammar_singleton = EBNFGrammar()
        return thread_local_ebnf_grammar_singleton


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


195
196
#TODO: Add Capture and Retrieve Validation: A variable mustn't be captured twice before retrival?!?

197
EBNF_transformation_table = {
198
199
200
201
    # AST Transformations for EBNF-grammar
    "syntax":
        remove_expendables,
    "directive, definition":
202
        remove_tokens('@', '='),
Eckhart Arnold's avatar
Eckhart Arnold committed
203
    "expression":
204
        [replace_by_single_child, flatten, remove_tokens('|')],
205
206
207
208
209
    "term":
        [replace_by_single_child, flatten],  # supports both idioms:  "{ factor }+" and "factor { factor }"
    "factor, flowmarker, retrieveop":
        replace_by_single_child,
    "group":
210
211
212
        [remove_enclosing_delimiters, replace_by_single_child],
    "oneormore, repetition, option, regexchain":
        [reduce_single_child, remove_enclosing_delimiters],
213
    "symbol, literal, regexp":
214
        [remove_expendables, reduce_single_child],
215
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
216
        [remove_expendables, reduce_single_child],
217
    "list_":
218
        [flatten, remove_tokens(',')],
219
    "*":
220
221
222
        [remove_expendables, replace_by_single_child]
}

223

224
EBNF_validation_table = {
225
    # Semantic validation on the AST. EXPERIMENTAL!
226
    "repetition, option, oneormore":
227
228
        [forbid('repetition', 'option', 'oneormore'),
         assert_content(r'(?!§)')]
229
}
230

231

232
def EBNFTransformer(syntax_tree: Node):
233
    for processing_table, key_func in [(EBNF_transformation_table, key_tag_name),
234
                                       (EBNF_validation_table, key_tag_name)]:
235
        traverse(syntax_tree, processing_table, key_func)
di68kap's avatar
di68kap committed
236
237


238
def get_ebnf_transformer() -> TransformationFunc:
239
    return EBNFTransformer
Eckhart Arnold's avatar
Eckhart Arnold committed
240
241
242
243
244
245
246
247


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

248
249
250

ScannerFactoryFunc = Callable[[], ScannerFunc]
ParserFactoryFunc = Callable[[], Grammar]
251
TransformerFactoryFunc = Callable[[], TransformationFunc]
252
253
254
CompilerFactoryFunc = Callable[[], Compiler]


255
SCANNER_FACTORY = '''
256
def get_scanner() -> ScannerFunc:
257
258
259
260
261
    return {NAME}Scanner
'''


GRAMMAR_FACTORY = '''
262
def get_grammar() -> {NAME}Grammar:
263
264
265
266
267
268
269
270
271
272
273
    global thread_local_{NAME}_grammar_singleton
    try:
        grammar = thread_local_{NAME}_grammar_singleton
        return grammar
    except NameError:
        thread_local_{NAME}_grammar_singleton = {NAME}Grammar()
        return thread_local_{NAME}_grammar_singleton
'''


TRANSFORMER_FACTORY = '''
274
def get_transformer() -> TransformationFunc:
275
276
277
278
279
    return {NAME}Transform
'''


COMPILER_FACTORY = '''
280
def get_compiler(grammar_name="{NAME}", grammar_source="") -> {NAME}Compiler:
281
282
283
284
285
286
287
288
289
290
291
    global thread_local_{NAME}_compiler_singleton
    try:
        compiler = thread_local_{NAME}_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
        return compiler
    except NameError:
        thread_local_{NAME}_compiler_singleton = \\
            {NAME}Compiler(grammar_name, grammar_source)
        return thread_local_{NAME}_compiler_singleton 
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
292

293
294
class EBNFCompilerError(Exception):
    """Error raised by `EBNFCompiler` class. (Not compilation errors
295
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
296
297
298
    pass


299
class EBNFCompiler(Compiler):
300
301
302
303
    """Generates a Parser from an abstract syntax tree of a grammar specified
    in EBNF-Notation.
    """
    COMMENT_KEYWORD = "COMMENT__"
304
305
    WHITESPACE_KEYWORD = "WSP__"
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, COMMENT_KEYWORD}
306
307
    AST_ERROR = "Badly structured syntax tree. " \
                "Potentially due to erroneuos AST transformation."
308
309
310
311
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
312
313
314
    WHITESPACE = {'horizontal': r'[\t ]*',  # default: horizontal
                  'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                  'vertical': r'\s*'}
315

316
    def __init__(self, grammar_name="", grammar_source=""):
Eckhart Arnold's avatar
Eckhart Arnold committed
317
        super(EBNFCompiler, self).__init__(grammar_name, grammar_source)
318
319
320
        self._reset()

    def _reset(self):
321
        self._result = ''           # type: str
322
323
324
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
325
        self.variables = set()      # type: Set[str]
326
        # self.definition_names = []  # type: List[str]
327
328
        self.recursive = set()      # type: Set[str]
        self.root = ""              # type: str
329
        self.directives = {'whitespace': self.WHITESPACE['horizontal'],
330
                           'comment': '',
331
                           'literalws': ['right'],
332
333
334
                           'tokens': set(),     # alt. 'scanner_tokens'
                           'filter': dict(),    # alt. 'filter'
                           'testing': False }
335

Eckhart Arnold's avatar
Eckhart Arnold committed
336
    @property
337
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
338
339
        return self._result

340
    def gen_scanner_skeleton(self) -> str:
341
        name = self.grammar_name + "Scanner"
342
343
        return "def %s(text):\n    return text\n" % name \
               + SCANNER_FACTORY.format(NAME=self.grammar_name)
344

345
    def gen_transformer_skeleton(self) -> str:
346
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
347
348
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
349
350
        tt_name = self.grammar_name + '_AST_transformation_table'
        tf_name = self.grammar_name + 'Transform'
di68kap's avatar
di68kap committed
351
        transtable = [tt_name + ' = {',
352
353
                      '    # AST Transformations for the ' +
                      self.grammar_name + '-grammar']
354
        for name in self.rules:
355
356
            transtable.append('    "' + name + '": no_transformation,')
        transtable += ['    "*": no_transformation', '}', '', tf_name +
357
                       ' = partial(traverse, processing_table=%s)' % tt_name, '']
358
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
359
360
        return '\n'.join(transtable)

361
    def gen_compiler_skeleton(self) -> str:
362
        if not self.rules:
363
364
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
365
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
366
367
368
369
                    '    """Compiler for the abstract-syntax-tree of a ' +
                    self.grammar_name + ' source file.',
                    '    """', '',
                    '    def __init__(self, grammar_name="' +
Eckhart Arnold's avatar
Eckhart Arnold committed
370
                    self.grammar_name + '", grammar_source=""):',
371
                    '        super(' + self.grammar_name +
Eckhart Arnold's avatar
Eckhart Arnold committed
372
                    'Compiler, self).__init__(grammar_name, grammar_source)',
373
                    "        assert re.match('\w+\Z', grammar_name)", '']
374
        for name in self.rules:
375
            method_name = Compiler.derive_method_name(name)
376
            if name == self.root:
377
                compiler += ['    def ' + method_name + '(self, node):',
378
379
                             '        return node', '']
            else:
380
                compiler += ['    def ' + method_name + '(self, node):',
381
                             '        pass', '']
382
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name)]
383
        return '\n'.join(compiler)
384

385
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
386
        # fix capture of variables that have been defined before usage [sic!]
387

388
389
390
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
391
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
392

393
        definitions.append(('wspR__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
394
                            if 'right' in self.directives['literalws'] else "''"))
395
        definitions.append(('wspL__', self.WHITESPACE_KEYWORD
Eckhart Arnold's avatar
Eckhart Arnold committed
396
                            if 'left' in self.directives['literalws'] else "''"))
397
        definitions.append((self.WHITESPACE_KEYWORD,
398
399
400
401
402
403
404
                            ("mixin_comment(whitespace="
                             "r'{whitespace}', comment=r'{comment}')").
                            format(**self.directives)))
        definitions.append((self.COMMENT_KEYWORD, "r'{comment}'".format(**self.directives)))

        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
405

406
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
407
        declarations = ['class ' + self.grammar_name +
408
                        'Grammar(Grammar):',
409
410
                        'r"""Parser for ' + article + self.grammar_name +
                        ' source file' +
411
                        (', with this grammar:' if self.grammar_source else '.')]
412
        definitions.append(('parser_initialization__', '"upon instantiation"'))
413
        if self.grammar_source:
414
            definitions.append(('source_hash__',
415
                                '"%s"' % md5(self.grammar_source, __version__)))
416
            declarations.append('')
417
            declarations += [line for line in self.grammar_source.split('\n')]
418
419
420
421
422
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
423

424
425
426
427
428
429
430
431
432
        self.root = definitions[0][0] if definitions else ""
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
433
434
435
436
437
438
439

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
                self.symbols[symbol].add_error("Missing definition for symbol '%s'" % symbol)
440
                root_node.error_flag = True
441
442
443

        # check for unconnected rules

444
        if not self.directives['testing']:
445
446
447
448
449
450
451
452
453
454
455
            defined_symbols.difference_update(self.RESERVED_SYMBOLS)

            def remove_connections(symbol):
                if symbol in defined_symbols:
                    defined_symbols.remove(symbol)
                    for related in self.rules[symbol][1:]:
                        remove_connections(str(related))

            remove_connections(self.root)
            for leftover in defined_symbols:
                self.rules[leftover][0].add_error(('Rule "%s" is not connected to parser '
456
457
                    'root "%s" !') % (leftover, self.root) + ' (Use directive "@testing=True" '
                    'to supress this error message.)')
458
459
460

        # set root parser and assemble python grammar definition

461
        if self.root and 'root__' not in self.rules:
462
463
            declarations.append('root__ = ' + self.root)
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
464
465
466
        self._result = '\n    '.join(declarations) \
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name)
        return self._result
467

468
    def on_syntax(self, node: Node) -> str:
469
470
471
472
        self._reset()
        definitions = []

        # drop the wrapping sequence node
473
474
        if len(node.children) == 1 and not node.children[0].parser.name:
            node = node.children[0]
475
476

        # compile definitions and directives and collect definitions
477
        for nd in node.children:
478
            if nd.parser.name == "definition":
479
                definitions.append(self._compile(nd))
480
481
            else:
                assert nd.parser.name == "directive", nd.as_sexpr()
482
                self._compile(nd)
483
                node.error_flag = node.error_flag or nd.error_flag
484

485
        return self.assemble_parser(definitions, node)
486

487
    def on_definition(self, node: Node) -> Tuple[str, str]:
488
        rule = str(node.children[0])
489
490
491
        if rule in self.rules:
            node.add_error('A rule with name "%s" has already been defined.' % rule)
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
492
493
494
495
            node.add_error('Symbol "%s" is a reserved symbol.' % rule)
        elif not sane_parser_name(rule):
            node.add_error('Illegal symbol "%s". Symbols must not start or '
                           ' end with a doube underscore "__".' % rule)
496
        elif rule in self.directives['tokens']:
497
498
499
500
            node.add_error('Symbol "%s" has already been defined as '
                           'a scanner token.' % rule)
        elif keyword.iskeyword(rule):
            node.add_error('Python keyword "%s" may not be used as a symbol. '
501
                           % rule + '(This may change in the future.)')
502
        try:
503
504
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
505
            defn = self._compile(node.children[1])
506
            if rule in self.variables:
507
                defn = 'Capture(%s)' % defn
508
                self.variables.remove(rule)
509
510
511
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
512
513
514
515
        except TypeError as error:
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + node.as_sexpr()
            node.add_error(errmsg)
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
516
        return rule, defn
517
518

    @staticmethod
519
    def _check_rx(node: Node, rx: str) -> str:
520
521
522
523
524
525
526
527
528
529
530
531
        """Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        rx = rx if rx.find('\n') < 0 or rx[0:4] == '(?x)' else '(?x)' + rx
        try:
            re.compile(rx)
        except Exception as re_error:
            node.add_error("malformed regular expression %s: %s" %
                           (repr(rx), str(re_error)))
        return rx

532
    def on_directive(self, node: Node) -> str:
533
        key = str(node.children[0]).lower()
534
        assert key not in self.directives['tokens']
535

536
        if key in {'comment', 'whitespace'}:
537
538
            if node.children[1].parser.name == "list_":
                if len(node.children[1].result) != 1:
Eckhart Arnold's avatar
Eckhart Arnold committed
539
                    node.add_error('Directive "%s" must have one, but not %i values.' %
540
541
                                   (key, len(node.children[1].result)))
                value = self._compile(node.children[1]).pop()
542
543
                if key == 'whitespace' and value in EBNFCompiler.WHITESPACE:
                    value = EBNFCompiler.WHITESPACE[value]  # replace whitespace-name by regex
544
                else:
545
                    node.add_error('Value "%s" not allowed for directive "%s".' % (value, key))
546
            else:
547
548
                value = str(node.children[1]).strip("~")  # cast(str, node.children[1].result).strip("~")
                if value != str(node.children[1]):  # cast(str, node.children[1].result):
549
550
551
552
553
554
                    node.add_error("Whitespace marker '~' not allowed in definition of "
                                   "%s regular expression." % key)
                if value[0] + value[-1] in {'""', "''"}:
                    value = escape_re(value[1:-1])
                elif value[0] + value[-1] == '//':
                    value = self._check_rx(node, value[1:-1])
555
556
557
                if key == 'whitespace' and not re.match(value, ''):
                    node.add_error("Implicit whitespace should always match the empty string, "
                                   "/%s/ does not." % value)
558
            self.directives[key] = value
559

560
561
562
563
        elif key == 'testing':
            value = str(node.children[1])
            self.directives['testing'] = value.lower() not in {"off", "false", "no"}

564
        elif key == 'literalws':
565
            value = {item.lower() for item in self._compile(node.children[1])}
566
            if (len(value - {'left', 'right', 'both', 'none'}) > 0
Eckhart Arnold's avatar
Eckhart Arnold committed
567
                    or ('none' in value and len(value) > 1)):
568
569
570
571
572
573
574
                node.add_error('Directive "literalws" allows the values '
                               '`left`, `right`, `both` or `none`, '
                               'but not `%s`' % ", ".join(value))
            ws = {'left', 'right'} if 'both' in value \
                else {} if 'none' in value else value
            self.directives[key] = list(ws)

575
        elif key in {'tokens', 'scanner_tokens'}:
576
            self.directives['tokens'] |= self._compile(node.children[1])
577

578
        elif key.endswith('_filter'):
579
            filter_set = self._compile(node.children[1])
580
581
582
583
            if not isinstance(filter_set, set) or len(filter_set) != 1:
                node.add_error('Directive "%s" accepts exactly on symbol, not %s'
                               % (key, str(filter_set)))
            self.directives['filter'][key[:-7]] = filter_set.pop()
584

585
586
587
        else:
            node.add_error('Unknown directive %s ! (Known ones are %s .)' %
                           (key,
588
                            ', '.join(list(self.directives.keys()))))
589
590
        return ""

591
    def non_terminal(self, node: Node, parser_class: str, custom_args: List[str]=[]) -> str:
592
593
594
        """Compiles any non-terminal, where `parser_class` indicates the Parser class
        name for the particular non-terminal.
        """
595
        arguments = [self._compile(r) for r in node.children] + custom_args
596
597
        return parser_class + '(' + ', '.join(arguments) + ')'

598
    def on_expression(self, node) -> str:
599
600
        return self.non_terminal(node, 'Alternative')

601
    def on_term(self, node) -> str:
602
603
        return self.non_terminal(node, 'Sequence')

604
    def on_factor(self, node: Node) -> str:
605
        assert node.children
606
        assert len(node.children) >= 2, node.as_sexpr()
607
        prefix = str(node.children[0])  # cast(str, node.children[0].result)
608
        custom_args = []  # type: List[str]
609
610

        if prefix in {'::', ':'}:
611
612
            assert len(node.children) == 2
            arg = node.children[-1]
613
            if arg.parser.name != 'symbol':
Eckhart Arnold's avatar
Eckhart Arnold committed
614
                node.add_error(('Retrieve Operator "%s" requires a symbol, '
615
616
                                'and not a %s.') % (prefix, str(arg.parser)))
                return str(arg.result)
617
            if str(arg) in self.directives['filter']:
618
                custom_args = ['filter=%s' % self.directives['filter'][str(arg)]]
619
            self.variables.add(str(arg))  # cast(str, arg.result)
620

621
        elif len(node.children) > 2:
622
623
            # shift = (Node(node.parser, node.result[1].result),)
            # node.result[1].result = shift + node.result[2:]
624
625
626
627
            node.children[1].result = (Node(node.children[1].parser, node.children[1].result),) \
                                    + node.children[2:]
            node.children[1].parser = node.parser
            node.result = (node.children[0], node.children[1])
628

629
        node.result = node.children[1:]
630
631
632
633
634
        try:
            parser_class = self.PREFIX_TABLE[prefix]
            return self.non_terminal(node, parser_class, custom_args)
        except KeyError:
            node.add_error('Unknown prefix "%s".' % prefix)
635
        return ""
636

637
    def on_option(self, node) -> str:
638
639
        return self.non_terminal(node, 'Optional')

640
    def on_repetition(self, node) -> str:
641
642
        return self.non_terminal(node, 'ZeroOrMore')

643
    def on_oneormore(self, node) -> str:
644
645
        return self.non_terminal(node, 'OneOrMore')

646
    def on_regexchain(self, node) -> str:
647
648
        raise EBNFCompilerError("Not yet implemented!")

649
    def on_group(self, node) -> str:
650
651
652
        raise EBNFCompilerError("Group nodes should have been eliminated by "
                                "AST transformation!")

653
654
655
656
    def on_symbol(self, node: Node) -> str:     # called only for symbols on the right hand side!
        symbol = str(node)  # ; assert result == cast(str, node.result)
        if symbol in self.directives['tokens']:
            return 'ScannerToken("' + symbol + '")'
657
        else:
658
659
660
661
662
663
            self.current_symbols.append(node)
            if symbol not in self.symbols:
                self.symbols[symbol] = node
            if symbol in self.rules:
                self.recursive.add(symbol)
            return symbol
664

665
    def on_literal(self, node) -> str:
666
        return 'Token(' + str(node).replace('\\', r'\\') + ')'  # return 'Token(' + ', '.join([node.result]) + ')' ?
667

668
    def on_regexp(self, node: Node) -> str:
669
        rx = str(node)
670
        name = []   # type: List[str]
671
672
        if rx[:2] == '~/':
            if not 'left' in self.directives['literalws']:
673
                name = ['wL=' + self.WHITESPACE_KEYWORD] + name
674
675
676
677
            rx = rx[1:]
        elif 'left' in self.directives['literalws']:
            name = ["wL=''"] + name
        if rx[-2:] == '/~':
Eckhart Arnold's avatar
Eckhart Arnold committed
678
            if 'right' not in self.directives['literalws']:
679
                name = ['wR=' + self.WHITESPACE_KEYWORD] + name
680
681
682
683
684
685
686
687
688
689
690
691
            rx = rx[:-1]
        elif 'right' in self.directives['literalws']:
            name = ["wR=''"] + name
        try:
            arg = repr(self._check_rx(node, rx[1:-1].replace(r'\/', '/')))
        except AttributeError as error:
            errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + \
                     node.as_sexpr()
            node.add_error(errmsg)
            return '"' + errmsg + '"'
        return 'RE(' + ', '.join([arg] + name) + ')'

692
    def on_list_(self, node) -> Set[str]:
693
        assert node.children
694
        return set(item.result.strip() for item in node.children)
695
696


697
def get_ebnf_compiler(grammar_name="", grammar_source="") -> EBNFCompiler:
Eckhart Arnold's avatar
Eckhart Arnold committed
698
699
700
701
702
703
704
705
    global thread_local_ebnf_compiler_singleton
    try:
        compiler = thread_local_ebnf_compiler_singleton
        compiler.set_grammar_name(grammar_name, grammar_source)
        return compiler
    except NameError:
        thread_local_ebnf_compiler_singleton = EBNFCompiler(grammar_name, grammar_source)
        return thread_local_ebnf_compiler_singleton