Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit a289b064 authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

- more tests and some corrections

parent 703308bd
......@@ -442,8 +442,7 @@ class EBNFCompiler(CompilerBase):
definitions = []
# drop the wrapping sequence node
if isinstance(node.parser, Sequence) and \
isinstance(node.result[0].parser, ZeroOrMore):
if len(node.children) == 1 and not node.result[0].parser.name:
node = node.result[0]
# compile definitions and directives and collect definitions
......
......@@ -351,7 +351,8 @@ class GrammarBase:
parser = self[start_parser]
stitches = []
rest = document
result = Node(None, '')
if not rest:
result, ignore = parser(rest)
while rest and len(stitches) < MAX_DROPOUTS:
result, rest = parser(rest)
if rest:
......@@ -586,11 +587,11 @@ class RE(Parser):
return Node(self, result), t
return None, text
def __str__(self):
if self.name == TOKEN_KEYWORD:
return 'Token "%s"' % self.main.regexp.pattern.replace('\\', '')
return self.name or ('RE ' + ('~' if self.wL else '')
+ '/%s/' % self.main.regexp.pattern + ('~' if self.wR else ''))
# def __str__(self):
# if self.name == TOKEN_KEYWORD:
# return 'Token "%s"' % self.main.regexp.pattern.replace('\\', '')
# return self.name or ('RE ' + ('~' if self.wL else '')
# + '/%s/' % self.main.regexp.pattern + ('~' if self.wR else ''))
def _grammar_assigned_notifier(self):
if self.grammar:
......@@ -917,7 +918,7 @@ class Forward(Parser):
Parser.__init__(self)
self.parser = None
self.name = ''
self.cycle_reached = False
# self.cycle_reached = False
def __deepcopy__(self, memo):
duplicate = self.__class__()
......@@ -929,16 +930,16 @@ class Forward(Parser):
def __call__(self, text):
return self.parser(text)
def __str__(self):
if self.cycle_reached:
if self.parser and self.parser.name:
return str(self.parser.name)
return "..."
else:
self.cycle_reached = True
s = str(self.parser)
self.cycle_reached = False
return s
# def __str__(self):
# if self.cycle_reached:
# if self.parser and self.parser.name:
# return str(self.parser.name)
# return "..."
# else:
# self.cycle_reached = True
# s = str(self.parser)
# self.cycle_reached = False
# return s
def set(self, parser):
assert isinstance(parser, Parser)
......@@ -1006,7 +1007,7 @@ class CompilerBase:
for the parsers of the sub nodes by itself. Rather, this should
be done within the compilation methods.
"""
elem = node.parser.name or node.parser.__class__.__name__
elem = str(node.parser)
if not sane_parser_name(elem):
node.add_error("Reserved name '%s' not allowed as parser "
"name! " % elem + "(Any name starting with "
......
......@@ -67,11 +67,12 @@ class MockParser:
syntax tree (re-)construction. In all other cases where a parser
object substitute is needed, chose the singleton ZOMBIE_PARSER.
"""
def __init__(self, name=''):
def __init__(self, name='', class_name=''):
self.name = name
self.class_name = class_name or self.__class__.__name__
def __str__(self):
return self.name or self.__class__.__name__
return self.name or self.class_name
class ZombieParser(MockParser):
......@@ -172,7 +173,7 @@ class Node:
return self.tag_name == other.tag_name and self.result == other.result
def __hash__(self):
return hash((str(self.parser), ))
return hash(str(self.parser))
def __deepcopy__(self, memodict={}):
result = copy.deepcopy(self.result)
......@@ -182,7 +183,7 @@ class Node:
@property
def tag_name(self):
return self.parser.name or self.parser.__class__.__name__
return str(self.parser)
# ONLY FOR DEBUGGING: return self.parser.name + ':' + self.parser.__class__.__name__
@property
......@@ -440,8 +441,8 @@ def mock_syntax_tree(sexpr):
if sexpr[0] != '(': raise ValueError('"(" expected, not ' + sexpr[:10])
# assert sexpr[0] == '(', sexpr
sexpr = sexpr[1:].strip()
m = re.match('\w+', sexpr)
name = sexpr[:m.end()]
m = re.match('[\w:]+', sexpr)
name, class_name = (sexpr[:m.end()].split(':') + [''])[:2]
sexpr = sexpr[m.end():].strip()
if sexpr[0] == '(':
result = tuple(mock_syntax_tree(block) for block in next_block(sexpr))
......@@ -460,7 +461,7 @@ def mock_syntax_tree(sexpr):
lines.append(sexpr[:m.end()])
sexpr = sexpr[m.end():]
result = "\n".join(lines)
return Node(MockParser(name), result)
return Node(MockParser(name, class_name), result)
########################################################################
......@@ -577,16 +578,12 @@ def is_whitespace(node):
return node.parser.name == WHITESPACE_KEYWORD
# def is_scanner_token(node):
# return isinstance(node.parser, ScannerToken)
def is_empty(node):
return not node.result
def is_expendable(node):
return is_empty(node) or is_whitespace(node) # or is_scanner_token(node)
return is_empty(node) or is_whitespace(node)
def is_token(node, token_set=frozenset()):
......
......@@ -303,6 +303,31 @@ class TestSelfHosting:
assert not e, ("%i: " % i) + str(e)
class TestBoundaryCases:
def setup(self):
self.gr = get_ebnf_grammar()
self.tr = get_ebnf_transformer()
self.cp = get_ebnf_compiler()
def test_empty_grammar(self):
t = self.gr("")
self.tr(t)
r = self.cp(t)
assert r
def test_single_statement_grammar(self):
t = self.gr("i = /i/")
self.tr(t)
r = self.cp(t)
assert r
def test_two_statement_grammar(self):
t = self.gr("i = k {k}\nk = /k/")
self.tr(t)
r = self.cp(t)
assert r
if __name__ == "__main__":
from run import runner
runner("", globals())
runner("TestBoundaryCases", globals())
......@@ -26,6 +26,7 @@ sys.path.extend(['../', './'])
from DHParser.toolkit import compact_sexpr
from DHParser.syntaxtree import traverse, mock_syntax_tree, reduce_single_child, \
replace_by_single_child, flatten, remove_expendables, TOKEN_KEYWORD
from DHParser.ebnf import get_ebnf_grammar, get_ebnf_transformer, get_ebnf_compiler
from DHParser.dsl import parser_factory
......@@ -65,6 +66,13 @@ class TestSExpr:
tree = mock_syntax_tree(sexpr_stripped)
assert compact_sexpr(tree.as_sexpr()) == '(a (b "c k l") (d "e") (f (g "h")))'
def test_mock_syntax_tree_with_classes(self):
sexpr = '(a:class1 (b:class2 x) (:class3 y) (c z))'
tree = mock_syntax_tree(sexpr)
assert tree.tag_name == 'a'
assert tree.result[0].tag_name == 'b'
assert tree.result[1].tag_name == 'class3'
assert tree.result[2].tag_name == 'c'
class TestNode:
"""
......@@ -113,6 +121,28 @@ class TestNode:
cpy.result[0].result = "epsilon"
assert cpy != self.unique_tree
def test_copy2(self):
# test if Node.__deepcopy__ goes sufficiently deep for ast-
# transformation and compiling to perform correctly after copy
ebnf = 'term = term ("*"|"/") factor | factor\nfactor = /[0-9]+/~'
parser = get_ebnf_grammar()
transform = get_ebnf_transformer()
compiler = get_ebnf_compiler()
tree = parser(ebnf)
tree_copy = copy.deepcopy(tree)
transform(tree_copy)
res1 = compiler(tree_copy)
t2 = copy.deepcopy(tree_copy)
res2 = compiler(t2)
assert res1 == res2
tree_copy = copy.deepcopy(tree)
transform(tree_copy)
res3 = compiler(tree_copy)
assert res3 == res2
transform(tree)
res4 = compiler(tree)
assert res4 == res3
class TestErrorHandling:
def test_error_flag_propagation(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment