Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration

Commit b4daffdc authored by di68kap's avatar di68kap
Browse files

- changed logs module into a more universal toolkit module

parent 8a2697ae
......@@ -28,8 +28,8 @@ try:
except ImportError:
import re
from EBNFcompiler import EBNFGrammar, EBNFCompiler, EBNFTransTable, load_if_file, md5
from logs import IS_LOGGING
from EBNFcompiler import *
from toolkit import *
from parsercombinators import *
from syntaxtree import *
from version import __version__
......@@ -61,23 +61,6 @@ COMPILER_SECTION = "COMPILER SECTION - Can be edited. Changes will be preserved.
END_SECTIONS_MARKER = "END OF PYDSL-SECTIONS"
# DELIMITER = "\n\n### DON'T EDIT OR REMOVE THIS LINE ###\n\n"
def is_python_code(text_or_file):
"""Checks whether 'text_or_file' is python code or the name of a file that
contains python code.
"""
if text_or_file.find('\n') < 0:
return text_or_file[-3:].lower() == '.py'
try:
compile(text_or_file, '<string>', 'exec')
return True
except (SyntaxError, ValueError, OverflowError):
pass
return False
class GrammarError(Exception):
"""Raised when (already) the grammar of a domain specific language (DSL)
contains errors.
......
......@@ -19,7 +19,6 @@ permissions and limitations under the License.
"""
# import collections
import hashlib
import keyword
from functools import partial
try:
......@@ -27,17 +26,18 @@ try:
except ImportError:
import re
from parsercombinators import *
from toolkit import load_if_file, escape_re, md5
from parsercombinators import GrammarBase, mixin_comment, Forward, RE, NegativeLookahead, \
Alternative, Sequence, Optional, Required, OneOrMore, ZeroOrMore, Token, CompilerBase, \
sane_parser_name
from syntaxtree import *
from version import __version__
__all__ = ['EBNFGrammar',
'EBNFTransTable',
'load_if_file',
'EBNFCompilerError',
# 'Scanner',
'md5',
'EBNFCompiler']
......@@ -139,19 +139,6 @@ EBNFTransTable = {
}
def load_if_file(text_or_file):
"""Reads and returns content of a file if parameter `text_or_file` is a
file name (i.e. a single line string), otherwise (i.e. if `text_or_file` is
a multiline string) returns the content of `text_or_file`.
"""
if text_or_file and text_or_file.find('\n') < 0:
with open(text_or_file, encoding="utf-8") as f:
content = f.read()
return content
else:
return text_or_file
class EBNFCompilerError(Exception):
"""Error raised by `EBNFCompiler` class. (Not compilation errors
in the strict sense, see `CompilationError` below)"""
......@@ -162,16 +149,6 @@ class EBNFCompilerError(Exception):
# 'symbol instantiation_call cls_name cls')
def md5(*txt):
"""Returns the md5-checksum for `txt`. This can be used to test if
some piece of text, for example a grammar source file, has changed.
"""
md5_hash = hashlib.md5()
for t in txt:
md5_hash.update(t.encode('utf8'))
return md5_hash.hexdigest()
class EBNFCompiler(CompilerBase):
"""Generates a Parser from an abstract syntax tree of a grammar specified
in EBNF-Notation.
......
......@@ -22,7 +22,7 @@ limitations under the License.
import os
import sys
sys.path.append(os.path.abspath('../../../'))
import logs
import toolkit
from DSLsupport import run_compiler, source_changed
MLW_ebnf = os.path.join('..', 'MLW.ebnf')
......@@ -30,7 +30,7 @@ MLW_compiler = os.path.join('..', 'MLW_compiler.py')
# print(source_changed(MLW_ebnf, MLW_compiler))
logs.logging_off()
toolkit.logging_off()
if (not os.path.exists(MLW_compiler) or
source_changed(MLW_ebnf, MLW_compiler)):
......@@ -40,7 +40,7 @@ if (not os.path.exists(MLW_compiler) or
print(errors)
sys.exit(1)
logs.logging_on()
toolkit.logging_on()
errors = run_compiler("fascitergula.mlw", MLW_compiler, ".xml")
if errors:
......
......@@ -59,7 +59,7 @@ try:
except ImportError:
import re
from logs import IS_LOGGING, LOGS_DIR
from toolkit import IS_LOGGING, LOGS_DIR, escape_re
from syntaxtree import WHITESPACE_KEYWORD, TOKEN_KEYWORD, ZOMBIE_PARSER, Node, \
error_messages, ASTTransform
......@@ -75,7 +75,6 @@ __all__ = ['HistoryRecord',
'ScannerToken',
'RegExp',
'RE',
'escape_re',
'Token',
'mixin_comment',
'UnaryOperator',
......@@ -532,16 +531,6 @@ class RE(Parser):
self.main.apply(func)
def escape_re(s):
"""Returns `s` with all regular expression special characters escaped.
"""
assert isinstance(s, str)
re_chars = r"\.^$*+?{}[]()#<>=|!"
for esc_ch in re_chars:
s = s.replace(esc_ch, '\\' + esc_ch)
return s
def Token(token, wL=None, wR=None, name=None):
return RE(escape_re(token), wL, wR, name or TOKEN_KEYWORD)
......
......@@ -29,7 +29,7 @@ except ImportError:
import re
from typing import NamedTuple
from logs import IS_LOGGING, LOGS_DIR
from toolkit import IS_LOGGING, LOGS_DIR, expand_table
__all__ = ['WHITESPACE_KEYWORD',
......@@ -401,28 +401,6 @@ def compact_sexpr(s):
########################################################################
def expand_table(compact_table):
"""Expands a table by separating keywords that are tuples or strings
containing comma separated words into single keyword entries with
the same values. Returns the expanded table.
Example:
>>> expand_table({"a, b": 1, "b": 1, ('d','e','f'):5, "c":3})
{'a': 1, 'b': 1, 'c': 3, 'd': 5, 'e': 5, 'f': 5}
"""
expanded_table = {}
keys = list(compact_table.keys())
for key in keys:
value = compact_table[key]
if isinstance(key, str):
parts = (s.strip() for s in key.split(','))
else:
assert isinstance(key, collections.abc.Iterable)
parts = key
for p in parts:
expanded_table[p] = value
return expanded_table
def ASTTransform(node, transtable):
"""Transforms the parse tree starting with the given ``node`` into
an abstract syntax tree by calling transformation functions
......
#!/usr/bin/python3
"""logs.py - basic log file support for DHParser
"""toolkit.py - utility functions for DHParser
Copyright 2016 by Eckhart Arnold (arnold@badw.de)
Bavarian Academy of Sciences an Humanities (badw.de)
......@@ -18,9 +18,13 @@ implied. See the License for the specific language governing
permissions and limitations under the License.
Module ``logs`` defines the global variable LOGGING which contains
the name of a directory where log files shall be placed. By setting
its value to the empty string "" logging can be turned off.
Module ``toolkit`` contains utility functions and cross-sectional
functionality like logging support that is needed across several
of the the other DHParser-Modules.
For logging functionality, the global variable LOGGING is defined which
contains the name of a directory where log files shall be placed. By
setting its value to the empty string "" logging can be turned off.
To read the directory name function ``LOGS_DIR()`` should be called
rather than reading the variable LOGGING. ``LOGS_DIR()`` makes sure
......@@ -28,10 +32,19 @@ the directory exists and raises an error if a file with the same name
already exists.
"""
import collections
import hashlib
import os
__all__ = ['logging_on', 'logging_off', 'IS_LOGGING', 'LOGS_DIR']
__all__ = ['logging_on',
'logging_off',
'IS_LOGGING',
'LOGS_DIR',
'escape_re',
'load_if_file',
'is_python_code',
'md5']
LOGGING: str = "LOGS" # LOGGING = "" turns logging off!
......@@ -86,3 +99,72 @@ def LOGS_DIR() -> str:
"do not place any files here or edit existing files in this directory\n"
"manually.\n")
return dirname
def escape_re(s):
"""Returns `s` with all regular expression special characters escaped.
"""
assert isinstance(s, str)
re_chars = r"\.^$*+?{}[]()#<>=|!"
for esc_ch in re_chars:
s = s.replace(esc_ch, '\\' + esc_ch)
return s
def load_if_file(text_or_file):
"""Reads and returns content of a file if parameter `text_or_file` is a
file name (i.e. a single line string), otherwise (i.e. if `text_or_file` is
a multiline string) `text_or_file` is returned.
"""
if text_or_file and text_or_file.find('\n') < 0:
with open(text_or_file, encoding="utf-8") as f:
content = f.read()
return content
else:
return text_or_file
def is_python_code(text_or_file):
"""Checks whether 'text_or_file' is python code or the name of a file that
contains python code.
"""
if text_or_file.find('\n') < 0:
return text_or_file[-3:].lower() == '.py'
try:
compile(text_or_file, '<string>', 'exec')
return True
except (SyntaxError, ValueError, OverflowError):
pass
return False
def md5(*txt):
"""Returns the md5-checksum for `txt`. This can be used to test if
some piece of text, for example a grammar source file, has changed.
"""
md5_hash = hashlib.md5()
for t in txt:
md5_hash.update(t.encode('utf8'))
return md5_hash.hexdigest()
def expand_table(compact_table):
"""Expands a table by separating keywords that are tuples or strings
containing comma separated words into single keyword entries with
the same values. Returns the expanded table.
Example:
>>> expand_table({"a, b": 1, "b": 1, ('d','e','f'):5, "c":3})
{'a': 1, 'b': 1, 'c': 3, 'd': 5, 'e': 5, 'f': 5}
"""
expanded_table = {}
keys = list(compact_table.keys())
for key in keys:
value = compact_table[key]
if isinstance(key, str):
parts = (s.strip() for s in key.split(','))
else:
assert isinstance(key, collections.abc.Iterable)
parts = key
for p in parts:
expanded_table[p] = value
return expanded_table
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment