test_parsers.py 12.8 KB
Newer Older
1
2
#!/usr/bin/python3

3
"""test_parsers.py - tests of the parsers-module of DHParser 
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

Author: Eckhart Arnold <arnold@badw.de>

Copyright 2017 Bavarian Academy of Sciences and Humanities

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

import sys
23
from functools import partial
24

Eckhart Arnold's avatar
Eckhart Arnold committed
25
sys.path.extend(['../', './'])
26

27
from DHParser.toolkit import is_logging, logging, compile_python_object
28
from DHParser.syntaxtree import traverse, remove_expendables, \
29
    replace_by_single_child, reduce_single_child, flatten, TOKEN_PTYPE
30
from DHParser.parsers import compile_source, Retrieve
Eckhart Arnold's avatar
Eckhart Arnold committed
31
from DHParser.ebnf import get_ebnf_grammar, get_ebnf_transformer, get_ebnf_compiler
32
from DHParser.dsl import parser_factory, DHPARSER_IMPORTS
33
34


Eckhart Arnold's avatar
Eckhart Arnold committed
35
36
37
38
39
40
41
42
43
ARITHMETIC_EBNF = """
    @ whitespace = linefeed
    formula = [ //~ ] expr
    expr = expr ("+"|"-") term | term
    term = term ("*"|"/") factor | factor
    factor = /[0-9]+/~
    # example:  "5 + 3 * 4"
    """

44
45
46
47
48
49
50
51
52
ARITHMETIC2_EBNF = """
    @ whitespace = linefeed
    formula = [ //~ ] expr
    expr = ex
    ex   = expr ("+"|"-") term | term
    term = term ("*"|"/") factor | factor
    factor = /[0-9]+/~
    # example:  "5 + 3 * 4"
    """
Eckhart Arnold's avatar
Eckhart Arnold committed
53

54

55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# ARITHMETIC_EBNF_transformation_table = {
#     # AST Transformations for the DSL-grammar
#     "formula": [remove_expendables],
#     "term, expr": [replace_by_single_child, flatten],
#     "factor": [remove_expendables, reduce_single_child],
#     (TOKEN_PTYPE): [remove_expendables, reduce_single_child],
#     "*": [remove_expendables, replace_by_single_child]
# }
#
#
# ARITHMETIC2_EBNF_transformation_table = {
#     # AST Transformations for the DSL-grammar
#     "formula": [remove_expendables],
#     "term, ex": [replace_by_single_child, flatten],
#     "factor": [remove_expendables, reduce_single_child],
#     (TOKEN_PTYPE): [remove_expendables, reduce_single_child],
#     "*": [remove_expendables, replace_by_single_child]
# }
#
#
# ARITHMETIC_EBNFTransform = partial(traverse, processing_table=ARITHMETIC_EBNF_transformation_table)
# ARITHMETIC2_EBNFTransform = partial(traverse, processing_table=ARITHMETIC2_EBNF_transformation_table)
Eckhart Arnold's avatar
Eckhart Arnold committed
77
78


79
class TestInfiLoopsAndRecursion:
80
    def test_direct_left_recursion(self):
Eckhart Arnold's avatar
Eckhart Arnold committed
81
        minilang = ARITHMETIC_EBNF
82
        snippet = "9 + 8 + 7 + 6 + 5 + 3 * 4"
83
        parser = parser_factory(minilang)()
84
        assert parser
Eckhart Arnold's avatar
Eckhart Arnold committed
85
        syntax_tree = parser(snippet)
86
        assert not syntax_tree.error_flag, str(syntax_tree.collect_errors())
87
        assert snippet == str(syntax_tree)
Eckhart Arnold's avatar
Eckhart Arnold committed
88
89
        if is_logging():
            syntax_tree.log("test_LeftRecursion_direct.cst")
Eckhart Arnold's avatar
Eckhart Arnold committed
90
            # self.minilang_parser1.log_parsing_history("test_LeftRecursion_direct")
91
92

    def test_indirect_left_recursion(self):
93
        minilang = ARITHMETIC2_EBNF
94
        snippet = "9 + 8 + 7 + 6 + 5 + 3 * 4"
95
96
97
98
99
100
101
        parser = parser_factory(minilang)()
        assert parser
        syntax_tree = parser(snippet)
        assert not syntax_tree.collect_errors()
        assert snippet == str(syntax_tree)
        if is_logging():
            syntax_tree.log("test_LeftRecursion_indirect.cst")
102

103
104
105
    def test_inifinite_loops(self):
        minilang = """not_forever = { // } \n"""
        snippet = " "
106
        parser = parser_factory(minilang)()
Eckhart Arnold's avatar
Eckhart Arnold committed
107
        syntax_tree = parser(snippet)
108
109
110
        assert syntax_tree.error_flag
        # print(syntax_tree.collect_errors())

111

112
113
114
115
116
117
118
class TestRegex:
    def test_multilineRegex(self):
        mlregex = r"""
        regex =  /\w+    # one or more alphabetical characters including the underscore
                  [+]    # followed by a plus sign
                  \w*    # possibly followed by more alpha chracters/
        """
Eckhart Arnold's avatar
Eckhart Arnold committed
119
120
        result, messages, syntax_tree = compile_source(mlregex, None, get_ebnf_grammar(),
                        get_ebnf_transformer(), get_ebnf_compiler('MultilineRegexTest'))
121
        assert result
122
123
124
125
126
127
128
        assert not messages
        parser = compile_python_object(DHPARSER_IMPORTS + result, '\w+Grammar$')()
        node, rest = parser.regex('abc+def')
        assert rest == ''
        assert node.parser.name == "regex"
        assert str(node) == 'abc+def'

129
130
131
132
133
134
135
136
137
138
139
140
    def test_token(self):
        tokenlang = r"""
            @whitespace = linefeed
            lang        = "" begin_token {/\w+/ ""} end_token
            begin_token = "\begin{document}"
            end_token   = "\end{document}"
            """
        testdoc = r"""
            \begin{document}
            test
            \end{document}
            """
Eckhart Arnold's avatar
Eckhart Arnold committed
141
142
        result, messages, syntax_tree = compile_source(tokenlang, None, get_ebnf_grammar(),
                                    get_ebnf_transformer(), get_ebnf_compiler("TokenTest"))
143
144
145
        assert result
        assert not messages
        parser = compile_python_object(DHPARSER_IMPORTS + result, '\w+Grammar$')()
Eckhart Arnold's avatar
Eckhart Arnold committed
146
        result = parser(testdoc)
147
148
149
        # parser.log_parsing_history("test.log")
        assert not result.error_flag

150

151
class TestGrammar:
152
    def setup(self):
153
154
155
156
157
158
        grammar = r"""@whitespace = horizontal
        haupt        = textzeile LEERZEILE
        textzeile    = { WORT }+
        WORT         = /[^ \t]+/~
        LEERZEILE    = /\n[ \t]*(?=\n)/~
        """
159
160
161
        self.pyparser, messages, syntax_tree = compile_source(grammar, None, get_ebnf_grammar(),
                                                              get_ebnf_transformer(), get_ebnf_compiler("PosTest"))
        assert self.pyparser
162
        assert not messages
163
164
165
166

    def test_pos_values_initialized(self):
        # checks whether pos values in the parsing result and in the
        # history record have been initialized
167
        with logging("LOGS"):
168
169
170
            grammar = compile_python_object(DHPARSER_IMPORTS + self.pyparser, '\w+Grammar$')()
            grammar("no_file_name*")
        for record in grammar.history__:
171
172
            assert not record.node or record.node.pos >= 0

173
    def test_select_parsing(self):
174
175
176
177
178
        grammar = compile_python_object(DHPARSER_IMPORTS + self.pyparser, '\w+Grammar$')()
        grammar("wort", "WORT")
        grammar("eine Zeile", "textzeile")
        grammar("kein Haupt", "haupt")
        grammar("so ist es richtig", "haupt")
179

180

181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
class TestPopRetrieve:
    mini_language = """
        document       = { text | codeblock }
        codeblock      = delimiter { text | (!:delimiter delimiter_sign) } ::delimiter
        delimiter      = delimiter_sign  # never use delimiter between capture and retrieve!!!
        delimiter_sign = /`+/
        text           = /[^`]+/ 
        """
    mini_lang2 = """
        @braces_filter=counterpart
        document       = { text | codeblock }
        codeblock      = braces { text | opening_braces | (!:braces closing_braces) } ::braces
        braces         = opening_braces
        opening_braces = /\{+/
        closing_braces = /\}+/
        text           = /[^{}]+/
        """
    mini_lang3 = """
        document       = { text | env }
        env            = (specialtag | opentag) text [closespecial | closetag]
        opentag        = "<" name ">"
        specialtag     = "<" /ABC/ !name ">"
        closetag       = close_slash | close_star 
        close_slash    = "<" ::name "/>"
        close_star     = "<" ::name "*>"
        closespecial   = "<" /ABC/~ ">"
        name           = /\w+/~
        text           = /[^<>]+/
        """

    def setup(self):
        self.minilang_parser = parser_factory(self.mini_language)()
        self.minilang_parser2 = parser_factory(self.mini_lang2)()
        self.minilang_parser3 = parser_factory(self.mini_lang3)()

    @staticmethod
    def opening_delimiter(node, name):
        return node.tag_name == name and not isinstance(node.parser, Retrieve)

    @staticmethod
    def closing_delimiter(node):
        return isinstance(node.parser, Retrieve)

    def test_compile_mini_language(self):
        assert self.minilang_parser
        assert self.minilang_parser2
        assert self.minilang_parser3

    def test_stackhandling(self):
        ambigous_opening = "<ABCnormal> normal tag <ABCnormal*>"
        syntax_tree = self.minilang_parser3(ambigous_opening)
        assert not syntax_tree.error_flag, str(syntax_tree.collect_errors())

        ambigous_opening = "<ABCnormal> normal tag <ABCnormal/>"
        syntax_tree = self.minilang_parser3(ambigous_opening)
        assert not syntax_tree.error_flag, str(syntax_tree.collect_errors())

        forgot_closing_tag = "<em> where is the closing tag?"
        syntax_tree = self.minilang_parser3(forgot_closing_tag)
        assert syntax_tree.error_flag, str(syntax_tree.collect_errors())

        proper = "<em> has closing tag <em/>"
        syntax_tree = self.minilang_parser3(proper)
        assert not syntax_tree.error_flag, str(syntax_tree.collect_errors())

        proper = "<em> has closing tag <em*>"
        syntax_tree = self.minilang_parser3(proper)
        assert not syntax_tree.error_flag, str(syntax_tree.collect_errors())

250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
    def test_cache_neutrality(self):
        """Test that packrat-caching does not interfere with
        Capture-Retrieve-Stack."""
        lang = """
            text = opening closing
            opening = (unmarked_package | marked_package)
            closing = ::variable
            unmarked_package = package "."
            marked_package = package "*" "."
            package = "(" variable ")"
            variable = /\w+/~
            """
        case = "(secret)*. secret"
        gr = parser_factory(lang)()
        st = gr(case)
        assert not st.error_flag, str(st.collect_errors())
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321

    def test_single_line(self):
        teststr = "Anfang ```code block `` <- keine Ende-Zeichen ! ``` Ende"
        syntax_tree = self.minilang_parser(teststr)
        assert not syntax_tree.collect_errors()
        delim = str(next(syntax_tree.find(partial(self.opening_delimiter, name="delimiter"))))
        pop = str(next(syntax_tree.find(self.closing_delimiter)))
        assert delim == pop
        if is_logging():
            syntax_tree.log("test_PopRetrieve_single_line.cst")

    def test_multi_line(self):
        teststr = """
            Anfang ```code block `` <- keine Ende-Zeichen ! ``` Ebde

            Absatz ohne ``` codeblock, aber
            das stellt sich erst am Ende herause...

            Mehrzeliger ```code block
            """
        syntax_tree = self.minilang_parser(teststr)
        assert not syntax_tree.collect_errors()
        delim = str(next(syntax_tree.find(partial(self.opening_delimiter, name="delimiter"))))
        pop = str(next(syntax_tree.find(self.closing_delimiter)))
        assert delim == pop
        if is_logging():
            syntax_tree.log("test_PopRetrieve_multi_line.cst")

    def test_single_line_complement(self):
        teststr = "Anfang {{{code block }} <- keine Ende-Zeichen ! }}} Ende"
        syntax_tree = self.minilang_parser2(teststr)
        assert not syntax_tree.collect_errors()
        delim = str(next(syntax_tree.find(partial(self.opening_delimiter, name="braces"))))
        pop = str(next(syntax_tree.find(self.closing_delimiter)))
        assert len(delim) == len(pop) and delim != pop
        if is_logging():
            syntax_tree.log("test_PopRetrieve_single_line.cst")

    def test_multi_line_complement(self):
        teststr = """
            Anfang {{{code block {{ <- keine Ende-Zeichen ! }}} Ende

            Absatz ohne {{{ codeblock, aber
            das stellt sich erst am Ende heraus...

            Mehrzeliger }}}code block
            """
        syntax_tree = self.minilang_parser2(teststr)
        assert not syntax_tree.collect_errors()
        delim = str(next(syntax_tree.find(partial(self.opening_delimiter, name="braces"))))
        pop = str(next(syntax_tree.find(self.closing_delimiter)))
        assert len(delim) == len(pop) and delim != pop
        if is_logging():
            syntax_tree.log("test_PopRetrieve_multi_line.cst")


322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
class TestWhitespaceHandling:
    minilang = """@testing = True
        doc = A B
        A = "A"
        B = "B"
        Rdoc = ar br
        ar = /A/
        br = /B/
        """

    def setup(self):
        self.gr = parser_factory(self.minilang)()

    def test_token_whitespace(self):
        st = self.gr("AB", 'doc')
        assert not st.error_flag
        st = self.gr("A B", 'doc')
        assert not st.error_flag

    def test_regexp_whitespace(self):
        st = self.gr("AB", 'Rdoc')
        assert not st.error_flag
        st = self.gr("A B", 'Rdoc')
        assert st.error_flag


348
if __name__ == "__main__":
349
    from DHParser.testing import runner
di68kap's avatar
di68kap committed
350
    runner("", globals())