test_ebnf.py 27 KB
Newer Older
1
2
#!/usr/bin/python3

3
"""test_ebnf.py - tests of the ebnf module of DHParser 
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
                             

Author: Eckhart Arnold <arnold@badw.de>

Copyright 2017 Bavarian Academy of Sciences and Humanities

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

Eckhart Arnold's avatar
Eckhart Arnold committed
23
import sys
Eckhart Arnold's avatar
Eckhart Arnold committed
24
from multiprocessing import Pool
Eckhart Arnold's avatar
Eckhart Arnold committed
25

Eckhart Arnold's avatar
Eckhart Arnold committed
26
27
sys.path.extend(['../', './'])

28
from DHParser.toolkit import compile_python_object, re
29
from DHParser.preprocess import nil_preprocessor
30
from DHParser import compile_source
31
from DHParser.error import has_errors, Error
32
from DHParser.syntaxtree import WHITESPACE_PTYPE
eckhart's avatar
eckhart committed
33
34
from DHParser.ebnf import get_ebnf_grammar, get_ebnf_transformer, EBNFTransform, \
    get_ebnf_compiler, compile_ebnf
35
from DHParser.dsl import CompilationError, compileDSL, DHPARSER_IMPORTS, grammar_provider
36
from DHParser.testing import grammar_unit
37
38


39
40
41
42
43
44
45
46
47
48
49
class TestDirectives:
    mini_language = """
        expression =  term  { ("+" | "-") term }
        term       =  factor  { ("*" | "/") factor }
        factor     =  constant | "("  expression  ")"
        constant   =  digit { digit } [ //~ ]
        digit      = /0/ | /1/ | /2/ | /3/ | /4/ | /5/ | /6/ | /7/ | /8/ | /9/ 
        """

    def test_whitespace_linefeed(self):
        lang = "@ whitespace = linefeed\n" + self.mini_language
50
        MinilangParser = grammar_provider(lang)
51
52
        parser = MinilangParser()
        assert parser
Eckhart Arnold's avatar
Eckhart Arnold committed
53
        syntax_tree = parser("3 + 4 * 12")
54
        # parser.log_parsing_history("WSP")
55
        assert not syntax_tree.collect_errors()
Eckhart Arnold's avatar
Eckhart Arnold committed
56
        syntax_tree = parser("3 + 4 \n * 12")
57
58
        # parser.log_parsing_history("WSPLF")
        assert not syntax_tree.collect_errors()
Eckhart Arnold's avatar
Eckhart Arnold committed
59
        syntax_tree = parser("3 + 4 \n \n * 12")
60
        assert syntax_tree.collect_errors()
Eckhart Arnold's avatar
Eckhart Arnold committed
61
        syntax_tree = parser("3 + 4 \n\n * 12")
62
63
64
65
        assert syntax_tree.collect_errors()

    def test_whitespace_vertical(self):
        lang = "@ whitespace = vertical\n" + self.mini_language
66
        parser = grammar_provider(lang)()
67
        assert parser
Eckhart Arnold's avatar
Eckhart Arnold committed
68
        syntax_tree = parser("3 + 4 * 12")
69
        assert not syntax_tree.collect_errors()
Eckhart Arnold's avatar
Eckhart Arnold committed
70
        syntax_tree = parser("3 + 4 \n * 12")
71
        assert not syntax_tree.collect_errors()
Eckhart Arnold's avatar
Eckhart Arnold committed
72
        syntax_tree = parser("3 + 4 \n \n * 12")
73
        assert not syntax_tree.collect_errors()
Eckhart Arnold's avatar
Eckhart Arnold committed
74
        syntax_tree = parser("3 + 4 \n\n * 12")
75
76
        assert not syntax_tree.collect_errors()

77
78
    def test_whitespace_horizontal(self):
        lang = "@ whitespace = horizontal\n" + self.mini_language
79
        parser = grammar_provider(lang)()
80
        assert parser
Eckhart Arnold's avatar
Eckhart Arnold committed
81
        syntax_tree = parser("3 + 4 * 12")
82
        assert not syntax_tree.collect_errors()
Eckhart Arnold's avatar
Eckhart Arnold committed
83
        syntax_tree = parser("3 + 4 \n * 12")
84
85
        assert syntax_tree.collect_errors()

86

Eckhart Arnold's avatar
Eckhart Arnold committed
87
88
89
90
91
92
93
94
95
96
97
98
class TestReservedSymbols:
    def test_comment_usage(self):
        lang = r"""
        @comment = /#.*(?:\n|$)/
        document = text [ COMMENT__ ]
        text = /[^#]+/
        """
        parser = grammar_provider(lang)()

    def test_whitespace(self):
        lang = r"""
        @whitespace = /\s*/
99
        document = WSP_RE__ { word WSP_RE__ }
Eckhart Arnold's avatar
Eckhart Arnold committed
100
101
102
103
104
105
106
107
        word = /\w+/ 
        """
        parser = grammar_provider(lang)()

    def test_mixin(self):
        lang = r"""
        @comment = /#.*(?:\n|$)/
        @whitespace = /\s*/
108
        document = WSP_RE__ { word WSP_RE__ }
Eckhart Arnold's avatar
Eckhart Arnold committed
109
110
111
112
113
114
115
        word = /\w+/ 
        """
        parser = grammar_provider(lang)()
        result = parser("test # kommentar")
        assert not result.error_flag, str(result.as_sxpr())


116
class TestEBNFParser:
Eckhart Arnold's avatar
Eckhart Arnold committed
117
    cases = {
118
119
120
121
122
123
124
        "list_": {
            "match": {
                1: "hund",
                2: "hund, katze,maus",
                3: "hund , katze"
            },
            "fail": {
125
126
127
                4: "123",
                5: '"literal"',
                6: "/regexp/"
128
129
130
131
            }
        }
    }

132
    def setup(self):
Eckhart Arnold's avatar
Eckhart Arnold committed
133
        self.EBNF = get_ebnf_grammar()
134

135
136
    def test_RE(self):
        gr = get_ebnf_grammar()
137
        m = gr.regexp.parsers[0].regexp.match(r'/[\\\\]/ xxx /')
138
139
140
141
        rs = m.group()
        assert rs.find('x') < 0, rs.group()
        rx = re.compile(rs[1:-1])
        assert rx.match(r'\\')
142

143
    def test_literal(self):
144
        snippet = '"text" '
Eckhart Arnold's avatar
Eckhart Arnold committed
145
        result = self.EBNF(snippet, 'literal')
146
147
        assert not result.error_flag
        assert str(result) == snippet
148
        assert result.select(lambda node: node.parser.ptype == WHITESPACE_PTYPE)
149

150
151
152
        result = self.EBNF('"text" ', 'literal')
        assert not result.error_flag
        result = self.EBNF(' "text"', 'literal')
153
154
        assert result.error_flag  # literals catch following, but not leading whitespace

155
156
157
158
159
160
161
162
    def test_plaintext(self):
        result = self.EBNF('`plain`', 'plaintext')
        assert not result.error_flag

    def test_list(self):
        grammar_unit(self.cases, get_ebnf_grammar, get_ebnf_transformer)


163

164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
class TestParserNameOverwriteBug:
    def test_term_bug(self):
        grammar = get_ebnf_grammar()
        st = grammar('impossible = [§"an optional requirement"]')
        # print(st.as_sxpr())
        get_ebnf_transformer()(st)
        # print(st.as_sxpr())
        lang = """series = "A" "B" §"C" "D"
        """
        parser = get_ebnf_grammar()
        st = grammar(lang)
        # print(st.as_sxpr())
        get_ebnf_transformer()(st)
        # print(st.as_sxpr())
        result = get_ebnf_compiler()(st)
        messages = st.collect_errors()
        assert not has_errors(messages), str(messages)

182
183
184
185
186
187
188
189
190
191
    def test_single_mandatory_bug(self):
        lang = """series = § /B/"""
        result, messages, ast = compile_ebnf(lang)
        # print(result)
        assert result.find('Required') < 0
        parser = grammar_provider(lang)()
        st = parser('B')
        assert not st.error_flag


192

193
194
class TestSemanticValidation:
    def check(self, minilang, bool_filter=lambda x: x):
Eckhart Arnold's avatar
Eckhart Arnold committed
195
        grammar = get_ebnf_grammar()
Eckhart Arnold's avatar
Eckhart Arnold committed
196
        st = grammar(minilang)
197
        assert not st.collect_errors()
198
        EBNFTransform()(st)
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
        assert bool_filter(st.collect_errors())

    def test_illegal_nesting(self):
        self.check('impossible = { [ "an optional requirement" ] }')

    def test_illegal_nesting_option_required(self):
        self.check('impossible = [ §"an optional requirement" ]')

    def test_illegal_nesting_oneormore_option(self):
        self.check('impossible = { [ "no use"] }+')

    def test_legal_nesting(self):
        self.check('possible = { [ "+" ] "1" }', lambda x: not x)


class TestCompilerErrors:
    def test_error_propagation(self):
216
        ebnf = "@ literalws = wrongvalue  # testing error propagation\n"
Eckhart Arnold's avatar
Eckhart Arnold committed
217
218
        result, messages, st = compile_source(ebnf, None, get_ebnf_grammar(),
            get_ebnf_transformer(), get_ebnf_compiler('ErrorPropagationTest'))
219
220
        assert messages

221
222
223
224
225
226
227
228
229
230
231
232
233
    def test_undefined_symbols(self):
        """Use of undefined symbols should be reported.
        """
        ebnf = """syntax = { intermediary }
                  intermediary = "This symbol is " [ badly_spelled ] "!"
                  bedly_spilled = "wrong" """
        result, messages, st = compile_source(ebnf, None, get_ebnf_grammar(),
            get_ebnf_transformer(), get_ebnf_compiler('UndefinedSymbols'))
        assert messages

    def test_no_error(self):
        """But reserved symbols should not be repoted as undefined.
        """
234
        ebnf = """nothing =  WSP_RE__ | COMMENT__\n"""
235
236
237
238
        result, messages, st = compile_source(ebnf, None, get_ebnf_grammar(),
            get_ebnf_transformer(), get_ebnf_compiler('UndefinedSymbols'))
        assert not bool(messages), messages

239

240
class TestSelfHosting:
Eckhart Arnold's avatar
Eckhart Arnold committed
241
242
243
244
245
246
247
248
249
    grammar = r"""
        # EBNF-Grammar in EBNF

        @ comment    =  /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
        @ whitespace =  /\s*/                            # whitespace includes linefeed
        @ literalws  =  right                            # trailing whitespace of literals will be ignored tacitly

        syntax     =  [~//] { definition | directive } §EOF
        definition =  symbol §"=" expression
250
        directive  =  "@" §symbol "=" ( regexp | literal | list_ )
Eckhart Arnold's avatar
Eckhart Arnold committed
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280

        expression =  term { "|" term }
        term       =  { factor }+
        factor     =  [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
                    | [flowmarker] literal
                    | [flowmarker] regexp
                    | [flowmarker] group
                    | [flowmarker] regexchain
                    | [flowmarker] oneormore
                    | repetition
                    | option

        flowmarker =  "!"  | "&"  | "§" |                # '!' negative lookahead, '&' positive lookahead, '§' required
                      "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
        retrieveop =  "::" | ":"                         # '::' pop, ':' retrieve

        group      =  "(" expression §")"
        regexchain =  ">" expression §"<"                # compiles "expression" into a singular regular expression
        oneormore  =  "{" expression "}+"
        repetition =  "{" expression §"}"
        option     =  "[" expression §"]"

        symbol     =  /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
        literal    =  /"(?:[^"]|\\")*?"/~                # e.g. "(", '+', 'while'
                    | /'(?:[^']|\\')*?'/~                # whitespace following literals will be ignored tacitly.
        regexp     =  /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~    # e.g. /\w+/, ~/#.*(?:\n|$)/~
                                                         # '~' is a whitespace-marker, if present leading or trailing
                                                         # whitespace of a regular expression will be ignored tacitly.
        list_      =  /\w+/~ { "," /\w+/~ }              # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
                                                         # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
281
        EOF =  !/./
Eckhart Arnold's avatar
Eckhart Arnold committed
282
283
        """

284
285
    def test_self(self):
        compiler_name = "EBNF"
Eckhart Arnold's avatar
Eckhart Arnold committed
286
287
288
289
        compiler = get_ebnf_compiler(compiler_name, self.grammar)
        parser = get_ebnf_grammar()
        result, errors, syntax_tree = compile_source(self.grammar, None, parser,
                                            get_ebnf_transformer(), compiler)
290
291
292
        assert not errors, str(errors)
        # compile the grammar again using the result of the previous
        # compilation as parser
293
        compileDSL(self.grammar, nil_preprocessor, result, get_ebnf_transformer(), compiler)
294

Eckhart Arnold's avatar
Eckhart Arnold committed
295
296
297
298
299
300
301
302
303
    def multiprocessing_task(self):
        compiler_name = "EBNF"
        compiler = get_ebnf_compiler(compiler_name, self.grammar)
        parser = get_ebnf_grammar()
        result, errors, syntax_tree = compile_source(self.grammar, None, parser,
                                            get_ebnf_transformer(), compiler)
        return errors

    def test_multiprocessing(self):
Eckhart Arnold's avatar
Eckhart Arnold committed
304
        with Pool() as pool:
Eckhart Arnold's avatar
Eckhart Arnold committed
305
            res = [pool.apply_async(self.multiprocessing_task, ()) for i in range(4)]
306
            errors = [r.get(timeout=10) for r in res]
Eckhart Arnold's avatar
Eckhart Arnold committed
307
308
        for i, e in enumerate(errors):
            assert not e, ("%i: " % i) + str(e)
309
310


311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
class TestBoundaryCases:
    def setup(self):
        self.gr = get_ebnf_grammar()
        self.tr = get_ebnf_transformer()
        self.cp = get_ebnf_compiler()

    def test_empty_grammar(self):
        t = self.gr("")
        self.tr(t)
        r = self.cp(t)
        assert r

    def test_single_statement_grammar(self):
        t = self.gr("i = /i/")
        self.tr(t)
        r = self.cp(t)
        assert r

    def test_two_statement_grammar(self):
        t = self.gr("i = k {k}\nk = /k/")
        self.tr(t)
        r = self.cp(t)
        assert r

335
336
337
338
    def test_unconnected_symbols(self):
        ebnf = """root = /.*/
                  unconnected = /.*/
        """
Eckhart Arnold's avatar
Eckhart Arnold committed
339
340
341
342
343
344
345
346
        result, messages, AST = compile_source(ebnf, nil_preprocessor,
                                               get_ebnf_grammar(),
                                               get_ebnf_transformer(),
                                               get_ebnf_compiler())
        if messages:
            assert not has_errors(messages), "Unconnected rules should result in a warning, " \
                "not an error: " + str(messages)
            grammar_src = result
347
348
            grammar = compile_python_object(DHPARSER_IMPORTS + grammar_src,
                                            'get_(?:\w+_)?grammar$')()
Eckhart Arnold's avatar
Eckhart Arnold committed
349
350
351
        else:
            assert False, "EBNF compiler should warn about unconnected rules."

352
353
354
355
        assert grammar['root'], "Grammar objects should be subscriptable by parser names!"
        try:
            unconnected = grammar['unconnected']
        except KeyError:
Eckhart Arnold's avatar
Eckhart Arnold committed
356
            assert False, "Grammar objects should be able to cope with unconnected parsers!"
357
358
359
360
361
362
        try:
            nonexistant = grammar['nonexistant']
            assert False, "Grammar object shoul raise a KeyError if subscripted by " \
                          "a non-existant parser name!"
        except KeyError:
            pass
363
364
365
366
367
368
369


class TestSynonymDetection:
    def test_synonym_detection(self):
        ebnf = """a = b
                  b = /b/
        """
370
        grammar = grammar_provider(ebnf)()
di68kap's avatar
di68kap committed
371
372
        assert grammar['a'].pname == 'a', grammar['a'].pname
        assert grammar['b'].pname == 'b', grammar['b'].pname
373
        assert grammar('b').as_sxpr().count('b') == 2
374

375

376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
class TestFlowControlOperators:
    def setup(self):
        self.t1 = """
        All work and no play 
        makes Jack a dull boy
        END
        """
        self.t2 = "All word and not play makes Jack a dull boy END\n"

    def test_lookbehind_indirect(self):
        lang = r"""
            document = ws sequence doc_end ws         
            sequence = { !end word ws }+
            doc_end  = -&SUCC_LB end        
            ws       = /\s*/
            end      = /END/
            word     = /\w+/
            SUCC_LB  = indirection
394
            indirection = /\s*?\n/
395
396
397
398
399
        """
        parser = grammar_provider(lang)()
        cst = parser(self.t1)
        assert not cst.error_flag, cst.as_sxpr()
        cst = parser(self.t2)
400
        # this should fail, because 'END' is not preceded by a line feed
401
402
        assert cst.error_flag, cst.as_sxpr()

403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
    def test_required_error_reporting(self):
        """Tests whether failures to comply with the required operator '§'
        are correctly reported as such.
        """
        lang1 = "nonsense == /\w+/~  # wrong_equal_sign"
        lang2 = "nonsense = [^{}%]+  # someone forgot the '/'-delimiters for regular expressions"
        try:
            parser_class = grammar_provider(lang1)
            assert False, "Compilation error expected."
        except CompilationError as error:
            pass
        try:
            parser_class = grammar_provider(lang2)
            assert False, "Compilation error expected."
        except CompilationError as error:
            pass

420

421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
class TestWhitespace:
    def test_whitespace(self):
        tail = r"""
            WORD     =  /\w+/~
            EOF      =  !/./
        """
        lang1 = r'document = "DOC" { WORD } EOF' + tail
        parser = grammar_provider(lang1)()
        cst = parser("DOC Wörter Wörter Wörter")
        assert not cst.error_flag
        cst = parser("DOCWörter Wörter Wörter")
        assert not cst.error_flag

        lang2 = r'document = `DOC` { WORD } EOF' + tail
        parser = grammar_provider(lang2)()
        cst = parser("DOC Wörter Wörter Wörter")
        assert cst.error_flag
        cst = parser("DOCWörter Wörter Wörter")
        assert not cst.error_flag

        lang3 = r'document = `DOC` ~ { WORD } EOF' + tail
        parser = grammar_provider(lang3)()
        cst = parser("DOC Wörter Wörter Wörter")
        assert not cst.error_flag
        cst = parser("DOCWörter Wörter Wörter")
        assert not cst.error_flag


449
450
451
452
class TestAllSome:
    def test_all(self):
        ebnf = 'prefix = <"A" "B">'
        grammar = grammar_provider(ebnf)()
453
        assert grammar('B A').content == 'B A'
454
455
456
457

    def test_some(self):
        ebnf = 'prefix = <"A" | "B">'
        grammar = grammar_provider(ebnf)()
458
459
        assert grammar('B A').content == 'B A'
        assert grammar('B').content == 'B'
460

461

462
class TestErrorCustomization:
463
    """
464
    Customized Errors replace existing errors with alternative
465
466
    error codes and messages that are more helptful to the user.
    """
467
    def test_customized_mandatory_continuation(self):
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
        lang = """
            document = series | /.*/
            @series_error = "a user defined error message"
            series = "X" | head §"C" "D"
            head = "A" "B"
            """
        parser = grammar_provider(lang)()
        st = parser("X");  assert not st.error_flag
        st = parser("ABCD");  assert not st.error_flag
        st = parser("A_CD");  assert not st.error_flag
        st = parser("AB_D");  assert st.error_flag
        assert st.collect_errors()[0].code == Error.MANDATORY_CONTINUATION
        assert st.collect_errors()[0].message == "a user defined error message"
        # transitivity of mandatory-operator
        st = parser("ABC_");  assert st.error_flag
        assert st.collect_errors()[0].code == Error.MANDATORY_CONTINUATION
        assert st.collect_errors()[0].message == "a user defined error message"

486
    def test_customized_error_case_sensitive(self):
487
488
489
490
491
492
493
494
495
496
497
        lang = """
            document = Series | /.*/
            @Series_error = "a user defined error message"
            Series = "X" | head §"C" "D"
            head = "A" "B"
            """
        parser = grammar_provider(lang)()
        st = parser("ABC_");  assert st.error_flag
        assert st.collect_errors()[0].code == Error.MANDATORY_CONTINUATION
        assert st.collect_errors()[0].message == "a user defined error message"

498
499
500
501
502
    def test_multiple_error_messages(self):
        lang = """
            document = series | /.*/
            @series_error = '_', "the underscore is wrong in this place"
            @series_error = '*', "the asterix is wrong in this place"
503
            @series_error = /(?<=C)\w/, 'C cannot be followed by {0}'
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
            @series_error = /\w/, "wrong letter {0} in place of {1}"
            @series_error = "fallback error message"
            series = "X" | head §"C" "D"
            head = "A" "B"
            """
        parser = grammar_provider(lang)()
        st = parser("AB*D");  assert st.error_flag
        assert st.collect_errors()[0].code == Error.MANDATORY_CONTINUATION
        assert st.collect_errors()[0].message == "the asterix is wrong in this place"
        # transitivity of mandatory-operator
        st = parser("ABC_");  assert st.error_flag
        assert st.collect_errors()[0].code == Error.MANDATORY_CONTINUATION
        assert st.collect_errors()[0].message == "the underscore is wrong in this place"
        st = parser("ABiD");  assert st.error_flag
        assert st.collect_errors()[0].code == Error.MANDATORY_CONTINUATION
        assert st.collect_errors()[0].message.startswith('wrong letter')
        st = parser("AB+D");  assert st.error_flag
        assert st.collect_errors()[0].code == Error.MANDATORY_CONTINUATION
        assert st.collect_errors()[0].message == "fallback error message"
523
524
525
        st = parser("ABCi");  assert st.error_flag
        assert st.collect_errors()[0].code == Error.MANDATORY_CONTINUATION
        assert st.collect_errors()[0].message.startswith('C cannot be followed by')
526
527
528


class TestErrorCustomizationErrors:
529
530
531
    def test_ambiguous_error_customization(self):
        lang = """
            document = series 
532
            @series_error = "ambiguous error message: does it apply to first or second '§'?"
533
534
535
536
537
538
539
            series = "A" § "B" "C" | "X" § "Y" "Z" 
            """
        try:
            parser = grammar_provider(lang)()
            assert False, "CompilationError because of ambiguous error message exptected!"
        except CompilationError as compilation_error:
            err = next(compilation_error.errors)
540
            assert err.code == Error.AMBIGUOUS_ERROR_HANDLING, str(compilation_error)
541
542

    def test_unsed_error_customization(self):
543
544
545
546
547
548
        lang = """
            document = series | other
            @other_error = "a user defined error message"
            series = "A" § "B" "C"
            other = "X" | "Y" | "Z"
            """
eckhart's avatar
eckhart committed
549
        result, messages, ast = compile_ebnf(lang)
550
        assert messages[0].code == Error.UNUSED_ERROR_HANDLING_WARNING
551

552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
    def test_multiple_resume_definitions(self):
        lang = """
            document = series
            @series_resume = /B/, /C/, /D/, /E/, /F/, /G/
            @series_resume = /X/, /Y/
            series = "A" §"B" "C" "D" "E" "F" "G"
            """
        result, messages, ast = compile_ebnf(lang)
        assert messages[0].code == Error.REDEFINED_DIRECTIVE

    def test_multiple_skip_definitions(self):
        lang = """
            document = series
            @series_skip = /B/, /C/, /D/, /E/, /F/, /G/
            @series_skip = /X/, /Y/
            series = "A" §"B" "C" "D" "E" "F" "G"
            """
        result, messages, ast = compile_ebnf(lang)
        assert messages[0].code == Error.REDEFINED_DIRECTIVE

572
573
574
575

class TestCustomizedResumeParsing:
    def setup(self):
        lang = """
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
            @ alpha_resume = 'BETA', GAMMA_STR
            @ beta_resume = GAMMA_RE
            @ bac_resume = /GA\w+/
            document = alpha [beta] gamma "."
            alpha = "ALPHA" abc
                abc = §"a" "b" "c"
              beta = "BETA" (bac | bca)
                bac = "b" "a" §"c"
                bca = "b" "c" §"a"
              gamma = "GAMMA" §(cab | cba)
                cab = "c" "a" §"b"
                cba = "c" "b" §"a"
            GAMMA_RE = /GA\w+/
            GAMMA_STR = "GAMMA"
            """
591
        self.gr = grammar_provider(lang)()
592
593
594
595
596
597
598
599
600
601
602

    def test_several_resume_rules_innermost_rule_matching(self):
        gr = self.gr
        content = 'ALPHA abc BETA bad GAMMA cab .'
        cst = gr(content)
        # print(cst.as_sxpr())
        assert cst.error_flag
        assert cst.content == content
        assert cst.pick('alpha').content.startswith('ALPHA')
        # because of resuming, there should be only on error message
        assert len(cst.collect_errors()) == 1
603

604
605
606
607
608
609
610
611
612
        content = 'ALPHA acb BETA bad GAMMA cab .'
        cst = gr(content)
        # print(cst.as_sxpr())
        assert cst.error_flag
        assert cst.content == content
        assert cst.pick('alpha').content.startswith('ALPHA')
        # because of resuming, there should be only on error message
        assert len(cst.collect_errors()) == 2

613
614
615
616
617
618
619
620
621
        content = 'ALPHA acb GAMMA cab .'
        cst = gr(content)
        # print(cst.as_sxpr())
        assert cst.error_flag
        assert cst.content == content
        assert cst.pick('alpha').content.startswith('ALPHA')
        # because of resuming, there should be only on error message
        assert len(cst.collect_errors()) == 1

622

623
624
625
class TestInSeriesResume:
    def setup(self):
        lang = """
626
627
628
629
            document = series
            @series_skip = /B/, /C/, /D/, /E/, /F/, /G/
            series = "A" §"B" "C" "D" "E" "F" "G"
            """
630
        self.gr = grammar_provider(lang)()
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660

    def test_garbage_in_series(self):
        st = self.gr('ABCDEFG')
        assert not st.error_flag
        st = self.gr('AB XYZ CDEFG')
        errors = st.collect_errors()
        assert len(errors) == 1 and errors[0].code == Error.MANDATORY_CONTINUATION
        st = self.gr('AB XYZ CDE XYZ FG')
        errors = st.collect_errors()
        assert len(errors) == 2 and all(err.code == Error.MANDATORY_CONTINUATION for err in errors)
        st = self.gr('AB XYZ CDE XNZ FG')  # fails to resume parsing
        errors = st.collect_errors()
        assert len(errors) >= 1 and errors[0].code == Error.MANDATORY_CONTINUATION

    def test_series_gap(self):
        st = self.gr('ABDEFG')
        errors = st.collect_errors()
        assert len(errors) == 1 and errors[0].code == Error.MANDATORY_CONTINUATION
        st = self.gr('ABXEFG')  # two missing, one wrong element added
        errors = st.collect_errors()
        assert len(errors) == 2 and all(err.code == Error.MANDATORY_CONTINUATION for err in errors)
        st = self.gr('AB_DE_G')
        errors = st.collect_errors()
        assert len(errors) == 2 and all(err.code == Error.MANDATORY_CONTINUATION for err in errors)

    def test_series_permutation(self):
        st = self.gr('ABEDFG')
        errors = st.collect_errors()
        assert len(errors) >= 1  # cannot really recover from permutation errors

661

662
class TestAllOfResume:
663
664
665
    def setup(self):
        lang = """
            document = allof
666
667
668
            @ allof_error = '{} erwartet, {} gefunden :-('
            @ allof_skip = /A/, /B/, /C/, /D/, /E/, /F/, /G/
            allof = < "A" "B" § "C" "D" "E" "F" "G" >
669
        """
670
        self.gr = grammar_provider(lang)()
671
672

    def test_garbage_added(self):
673
674
        st = self.gr('GFCBAED')
        assert not st.error_flag
675
676
        st = self.gr('GFCB XYZ AED')
        errors = st.collect_errors()
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
        assert errors[0].code == Error.MANDATORY_CONTINUATION
        assert str(errors[0]).find(':-(') >= 0


    def test_allof_resume_later(self):
        lang = """
            document = flow "."
            @ flow_resume = '.'
            flow = allof | series
            @ allof_error = '{} erwartet, {} gefunden :-('
            allof = < "A" "B" § "C" "D" "E" "F" "G" >
            series = "E" "X" "Y" "Z"
        """
        gr = grammar_provider(lang)()
        st = gr('GFCBAED.')
        assert not st.error_flag
        st = gr('GFCBAED.')
        assert not st.error_flag
        st = gr('EXYZ.')
        assert not st.error_flag
        st = gr('EDXYZ.')
        assert st.error_flag
        assert len(st.collect_errors()) == 1
        st = gr('FCB_GAED.')
        assert len(st.collect_errors()) == 1


    def test_complex_resume_task(self):
        lang = """
            document = flow { flow } "."
            @ flow_resume = '.'
            flow = allof | series
            @ allof_error = '{} erwartet, {} gefunden :-('
            @ allof_resume = 'E', 'A'
            allof = < "A" "B" § "C" "D" "E" "F" "G" >
            @ series_resume = 'E', 'A'
            series = "E" "X" §"Y" "Z"
        """
        gr = grammar_provider(lang)()
        st = gr('GFCBAED.')
        assert not st.error_flag
        st = gr('GFCBAED.')
        assert not st.error_flag
        st = gr('EXYZ.')
        assert not st.error_flag
        st = gr('EDXYZ.')
        assert st.error_flag
        assert len(st.collect_errors()) == 1
        st = gr('FCB_GAED.')
        assert len(st.collect_errors()) == 2
        st = gr('EXY EXYZ.')
        assert len(st.collect_errors()) == 1

730
731


732
if __name__ == "__main__":
733
    from DHParser.testing import runner
734

735
    runner("", globals())