Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit 7803dedb authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

- some bugfixes

parent 933b2abc
......@@ -786,6 +786,8 @@ class EBNFCompiler(Compiler):
self.symbols[symbol] = node # remember first use of symbol
if symbol in self.rules:
self.recursive.add(symbol)
if symbol in (EBNFCompiler.WHITESPACE_KEYWORD, EBNFCompiler.COMMENT_KEYWORD):
return "RegExp(%s)" % symbol
return symbol
......
......@@ -197,9 +197,6 @@ def add_parser_guard(parser_func):
grammar.moving_forward__ = True
grammar.left_recursion_encountered__ = False
if grammar.history_tracking__:
grammar.call_stack__.append(parser)
# if location has already been visited by the current parser,
# return saved result
if location in parser.visited:
......@@ -210,6 +207,9 @@ def add_parser_guard(parser_func):
grammar.left_recursion_encountered__ = True
return None, text
if grammar.history_tracking__:
grammar.call_stack__.append(parser)
parser.recursion_counter[location] += 1
# run original __call__ method
......@@ -1032,7 +1032,7 @@ class UnaryOperator(Parser):
"""
def __init__(self, parser: Parser, name: str = '') -> None:
super(UnaryOperator, self).__init__(name)
# assert isinstance(parser, Parser)
assert isinstance(parser, Parser), str(parser)
self.parser = parser # type: Parser
def __deepcopy__(self, memo):
......@@ -1057,7 +1057,7 @@ class NaryOperator(Parser):
"""
def __init__(self, *parsers: Parser, name: str = '') -> None:
super(NaryOperator, self).__init__(name)
# assert all([isinstance(parser, Parser) for parser in parsers]), str(parsers)
assert all([isinstance(parser, Parser) for parser in parsers]), str(parsers)
self.parsers = parsers # type: Tuple[Parser, ...]
def __deepcopy__(self, memo):
......
......@@ -76,10 +76,8 @@ tabular_config = "{" /[lcr|]+/~ §"}"
block_of_paragraphs = /{/~ sequence §/}/
sequence = { (paragraph | block_environment ) [PARSEP] }+
paragraph = { !blockcmd text_element //~ }+
text_element = command | text | block | inline_environment
text_element = text | block | inline_environment | command
#### inline enivronments ####
......@@ -146,7 +144,8 @@ TEXTCHUNK = /[^\\%$&\{\}\[\]\s\n]+/ # some piece of text excluding white
# linefeed and special characters
LF = !GAP /[ \t]*\n[ \t]*/ # linefeed but not an empty line
LFF = //~ -&LB [ WSPC ] # at least one linefeed
WSPC = { ~/\s+/~ }+ # arbitrary horizontal or vertical whitespace
WSPC = { COMMENT__ | /\s+/ }+
# WSPC = { /\s+/~ | ~/\s+/ }+ # arbitrary horizontal or vertical whitespace
PARSEP = { GAP }+ # paragraph separator
GAP = /[ \t]*(?:\n[ \t]*)+\n/~ # at least one empty line, i.e.
# [whitespace] linefeed [whitespace] linefeed
......
......@@ -7,26 +7,22 @@
#######################################################################
from functools import partial
import os
import sys
from functools import partial
try:
import regex as re
except ImportError:
import re
from DHParser import logging, is_filename, load_if_file, \
Grammar, Compiler, nil_preprocessor, \
Lookbehind, Lookahead, Alternative, Pop, Required, Token, Synonym, \
Optional, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, RE, Capture, \
from DHParser import logging, is_filename, Grammar, Compiler, Lookbehind, Alternative, Pop, \
Required, Token, Synonym, \
Optional, NegativeLookbehind, OneOrMore, RegExp, Series, RE, Capture, \
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \
last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, MockParser, \
traverse, remove_children_if, merge_children, TRUE_CONDITION, is_anonymous, \
PreprocessorFunc, \
Node, TransformationFunc, traverse, remove_children_if, is_anonymous, \
reduce_single_child, replace_by_single_child, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_parser, remove_content, remove_brackets, replace_parser, \
keep_children, is_one_of, has_content, apply_if, remove_first, remove_last
flatten, is_empty, collapse, replace_content, remove_brackets, is_one_of, remove_first
#######################################################################
......@@ -129,10 +125,8 @@ class LaTeXGrammar(Grammar):
block_of_paragraphs = /{/~ sequence §/}/
sequence = { (paragraph | block_environment ) [PARSEP] }+
paragraph = { !blockcmd text_element //~ }+
text_element = command | text | block | inline_environment
text_element = text | block | inline_environment | command
#### inline enivronments ####
......@@ -199,7 +193,8 @@ class LaTeXGrammar(Grammar):
# linefeed and special characters
LF = !GAP /[ \t]*\n[ \t]*/ # linefeed but not an empty line
LFF = //~ -&LB [ WSPC ] # at least one linefeed
WSPC = { ~/\s+/~ }+ # arbitrary horizontal or vertical whitespace
WSPC = { COMMENT__ | /\s+/ }+
# WSPC = { /\s+/~ | ~/\s+/ }+ # arbitrary horizontal or vertical whitespace
PARSEP = { GAP }+ # paragraph separator
GAP = /[ \t]*(?:\n[ \t]*)+\n/~ # at least one empty line, i.e.
# [whitespace] linefeed [whitespace] linefeed
......@@ -215,7 +210,7 @@ class LaTeXGrammar(Grammar):
end_generic_block = Forward()
paragraph = Forward()
text_element = Forward()
source_hash__ = "773d8d68e38663befc9488f7e0cb60e4"
source_hash__ = "529c853d5829c3016605e4ee7ed69ddb"
parser_initialization__ = "upon instantiation"
COMMENT__ = r'%.*(?:\n|$)'
WSP__ = mixin_comment(whitespace=r'[ \t]*(?:\n(?![ \t]*\n)[ \t]*)?', comment=r'%.*(?:\n|$)')
......@@ -226,7 +221,7 @@ class LaTeXGrammar(Grammar):
LB = RegExp('\\s*?\\n|$')
GAP = RE('[ \\t]*(?:\\n[ \\t]*)+\\n')
PARSEP = OneOrMore(GAP)
WSPC = OneOrMore(RE('\\s+', wL=WSP__))
WSPC = OneOrMore(Alternative(RegExp(COMMENT__), RegExp('\\s+')))
LFF = Series(RE(''), Lookbehind(LB), Optional(WSPC))
LF = Series(NegativeLookahead(GAP), RegExp('[ \\t]*\\n[ \\t]*'))
TEXTCHUNK = RegExp('[^\\\\%$&\\{\\}\\[\\]\\s\\n]+')
......@@ -256,7 +251,7 @@ class LaTeXGrammar(Grammar):
generic_inline_env = Series(begin_inline_env, RE(''), paragraph, Required(end_inline_env))
known_inline_env = Synonym(inline_math)
inline_environment = Alternative(known_inline_env, generic_inline_env)
text_element.set(Alternative(command, text, block, inline_environment))
text_element.set(Alternative(text, block, inline_environment, command))
paragraph.set(OneOrMore(Series(NegativeLookahead(blockcmd), text_element, RE(''))))
sequence = OneOrMore(Series(Alternative(paragraph, block_environment), Optional(PARSEP)))
block_of_paragraphs.set(Series(RE('{'), sequence, Required(RegExp('}'))))
......
......@@ -77,7 +77,17 @@ class TestCompilerGeneration:
for name in (self.grammar_name, self.compiler_name, self.text_name, self.result_name):
if os.path.exists(name):
os.remove(name)
pass
if os.path.exists('LOGS'):
files = os.listdir('LOGS')
flag = False
for file in files:
if file.startswith('TestCompilerGenerationCompiler') or file == "info.txt":
os.remove(os.path.join('LOGS', file))
else:
flag = True
if not flag:
os.rmdir('LOGS')
def test_load_compiler_suite(self):
src = compileEBNF(self.trivial_lang, "Trivial")
......
......@@ -83,6 +83,35 @@ class TestDirectives:
assert syntax_tree.collect_errors()
class TestReservedSymbols:
def test_comment_usage(self):
lang = r"""
@comment = /#.*(?:\n|$)/
document = text [ COMMENT__ ]
text = /[^#]+/
"""
parser = grammar_provider(lang)()
def test_whitespace(self):
lang = r"""
@whitespace = /\s*/
document = WSP__ { word WSP__ }
word = /\w+/
"""
parser = grammar_provider(lang)()
def test_mixin(self):
lang = r"""
@comment = /#.*(?:\n|$)/
@whitespace = /\s*/
document = WSP__ { word WSP__ }
word = /\w+/
"""
parser = grammar_provider(lang)()
result = parser("test # kommentar")
assert not result.error_flag, str(result.as_sxpr())
class TestEBNFParser:
cases = {
"list_": {
......
......@@ -19,6 +19,8 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import sys
from functools import partial
......@@ -102,6 +104,18 @@ class TestGrammarTest:
}
}
def teardown(self):
if os.path.exists('REPORT'):
files = os.listdir('REPORT')
flag = False
for file in files:
if re.match(r'\d+\.md', file):
os.remove(os.path.join('REPORT', file))
else:
flag = True
if not flag:
os.rmdir('REPORT')
def test_testing_grammar(self):
parser_fac = grammar_provider(ARITHMETIC_EBNF)
trans_fac = lambda : ARITHMETIC_EBNFTransform
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment