Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit 43bc5127 authored by eckhart's avatar eckhart
Browse files

small optimizations

parent 2c57bc34
...@@ -158,7 +158,7 @@ def transformation_factory(t1=None, t2=None, t3=None, t4=None, t5=None): ...@@ -158,7 +158,7 @@ def transformation_factory(t1=None, t2=None, t3=None, t4=None, t5=None):
f = singledispatch(f) f = singledispatch(f)
try: try:
if len(params) == 1 and issubclass(p1type, Container) \ if len(params) == 1 and issubclass(p1type, Container) \
and not issubclass(p1type, Text) and not issubclass(p1type, ByteString): and not (issubclass(p1type, Text) or issubclass(p1type, ByteString)):
def gen_special(*args): def gen_special(*args):
c = set(args) if issubclass(p1type, AbstractSet) else \ c = set(args) if issubclass(p1type, AbstractSet) else \
list(args) if issubclass(p1type, Sequence) else args list(args) if issubclass(p1type, Sequence) else args
...@@ -241,8 +241,8 @@ def traverse(root_node: Node, ...@@ -241,8 +241,8 @@ def traverse(root_node: Node,
# Is this optimazation really needed? # Is this optimazation really needed?
if '__cache__' in processing_table: if '__cache__' in processing_table:
# assume that processing table has already been expanded # assume that processing table has already been expanded
table = processing_table table = processing_table # type: ProcessingTableType
cache = processing_table['__cache__'] cache = processing_table['__cache__'] # type: Dictionary[str, List[Callable]]
else: else:
# normalize processing_table entries by turning single values # normalize processing_table entries by turning single values
# into lists with a single value # into lists with a single value
...@@ -261,6 +261,7 @@ def traverse(root_node: Node, ...@@ -261,6 +261,7 @@ def traverse(root_node: Node,
# cache = {} # type: Dict[str, List[Callable]] # cache = {} # type: Dict[str, List[Callable]]
def traverse_recursive(context): def traverse_recursive(context):
nonlocal cache
node = context[-1] node = context[-1]
if node.children: if node.children:
for child in node.result: for child in node.result:
...@@ -385,8 +386,7 @@ def is_token(context: List[Node], tokens: AbstractSet[str] = frozenset()) -> boo ...@@ -385,8 +386,7 @@ def is_token(context: List[Node], tokens: AbstractSet[str] = frozenset()) -> boo
"""Checks whether the last node in the context has `ptype == TOKEN_PTYPE` """Checks whether the last node in the context has `ptype == TOKEN_PTYPE`
and it's content matches one of the given tokens. Leading and trailing and it's content matches one of the given tokens. Leading and trailing
whitespace-tokens will be ignored. In case an empty set of tokens is passed, whitespace-tokens will be ignored. In case an empty set of tokens is passed,
any token is a match. If only ":" is given all anonymous tokens but no other any token is a match.
tokens are a match.
""" """
def stripped(nd: Node) -> str: def stripped(nd: Node) -> str:
"""Removes leading and trailing whitespace-nodes from content.""" """Removes leading and trailing whitespace-nodes from content."""
......
...@@ -54,7 +54,7 @@ def fail_on_error(src, result): ...@@ -54,7 +54,7 @@ def fail_on_error(src, result):
def tst_func(): def tst_func():
with DHParser.log.logging(True): with DHParser.log.logging(False):
if not os.path.exists('REPORT'): if not os.path.exists('REPORT'):
os.mkdir('REPORT') os.mkdir('REPORT')
files = os.listdir('testdata') files = os.listdir('testdata')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment