Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit 663e5268 authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

- source_map test

parent d18f157c
......@@ -19,10 +19,10 @@ permissions and limitations under the License.
import bisect
import collections
import functools
from DHParser.toolkit import typing, re
from typing import Union, Callable
from DHParser.toolkit import re
__all__ = ('RX_TOKEN_NAME',
'BEGIN_TOKEN',
'TOKEN_DELIMITER',
......@@ -102,9 +102,9 @@ def tokenized_to_original_mapping(tokenized_source: str) -> SourceMap:
d = tokenized_source.find(TOKEN_DELIMITER, i)
e = tokenized_source.find(END_TOKEN, i)
assert 0 <= d < e
o -= (d - i + 2)
o -= (d - i + 3)
positions.extend([d + 1, e + 1])
offsets.extend([o, o - 1])
offsets.extend([o + 1, o])
i = tokenized_source.find(BEGIN_TOKEN, e + 1)
# post conditions
......
......@@ -22,10 +22,10 @@ limitations under the License.
# import sys
# sys.path.append('../')
from DHParser.toolkit import re, lstrip_docstring, logging
from DHParser.preprocess import make_token, tokenized_to_original_mapping, source_map, \
BEGIN_TOKEN, END_TOKEN, TOKEN_DELIMITER, pp_tokenized
from DHParser.dsl import grammar_provider
from DHParser.preprocess import make_token, tokenized_to_original_mapping, source_map, \
BEGIN_TOKEN, END_TOKEN, TOKEN_DELIMITER
from DHParser.toolkit import lstrip_docstring
class TestMakeToken:
......@@ -109,6 +109,14 @@ class TestTokenParsing:
self.tokenized = self.preprocess_indentation(self.code)
self.srcmap = tokenized_to_original_mapping(self.tokenized)
def verify_mapping(self, teststr, orig_text, preprocessed_text):
mapped_pos = preprocessed_text.find(teststr)
assert mapped_pos >= 0
original_pos = source_map(mapped_pos, self.srcmap)
assert orig_text[original_pos:original_pos + len(teststr)] == teststr, \
'"%s" (%i) wrongly mapped onto "%s" (%i)' % \
(teststr, mapped_pos, orig_text[original_pos:original_pos + len(teststr)], original_pos)
def test_parse_tokenized(self):
cst = self.grammar(self.tokenized)
# for e in cst.collect_errors(self.tokenized):
......@@ -116,6 +124,13 @@ class TestTokenParsing:
# print()
assert not cst.error_flag
def test_source_mapping(self):
self.verify_mapping("def func", self.code, self.tokenized)
self.verify_mapping("x > 0:", self.code, self.tokenized)
self.verify_mapping("if y > 0:", self.code, self.tokenized)
self.verify_mapping("print(x)", self.code, self.tokenized)
self.verify_mapping("print(y)", self.code, self.tokenized)
if __name__ == "__main__":
# tp = TestTokenParsing()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment