Commit ac2a9952 authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

LaTeX-Beispiel: Ergänzungen

parent 37399df4
......@@ -357,7 +357,7 @@ def compile_source(source: str,
if preprocessor is None:
source_text = original_text # type: str
source_mapping = lambda i: SourceLocation(source_name, i) # type: SourceMapFunc
source_mapping = lambda i: SourceLocation(source_name, 0, i) # type: SourceMapFunc
else:
source_text, source_mapping = with_source_mapping(preprocessor(original_text, source_name))
......
......@@ -195,6 +195,8 @@ class Error:
:ivar orig_doc: the name or path or url of the original source file to
which ``orig_pos`` is related. This is relevant, if the preprocessed
document has been plugged together from several source files.
:ivar orig_offset: the offset of the included ``oric_doc`` within the
outermost including document.
:ivar line: the line number where the error occurred in the original text.
Lines are counted from 1 onward.
:ivar column: the column where the error occurred in the original text.
......@@ -209,7 +211,7 @@ class Error:
__slots__ = ['message', 'code', '_pos', 'line', 'column', 'length',
'end_line', 'end_column', 'related', 'orig_pos', 'orig_doc',
'relatedUri']
'orig_offset', 'relatedUri']
def __init__(self, message: str, pos: int, code: ErrorCode = ERROR,
line: int = -1, column: int = -1, length: int = 1,
......@@ -227,6 +229,7 @@ class Error:
self.code = code # type: ErrorCode
self.orig_pos = orig_pos # type: int
self.orig_doc = orig_doc # type: str
self.orig_offset = 0 # type: int
self.line = line # type: int
self.column = column # type: int
# support for Language Server Protocol Diagnostics
......@@ -385,39 +388,71 @@ def adjust_error_locations(errors: List[Error],
Args:
errors: The list of errors as returned by the method
``errors()`` of a Node object
original_text: The source text on which the errors occurred.
original_text: The source text in which the errors occurred.
(Needed in order to determine the line and column numbers.)
source_mapping: A function that maps error positions to their
positions in the original source file.
"""
def relative_lc(lbreaks: List[int], pos: int, offset: int) -> Tuple[int, int]:
if offset == 0:
return line_col(lbreaks, pos)
else:
# assert pos >= offset, f"Precondition pos: {pos} >= offset: {offset} violated!"
base_l, base_c = line_col(lbreaks, offset)
l, c = line_col(lbreaks, offset + pos)
if l > base_l:
return l - base_l + 1, c
else:
return 1, c - base_c + 1
line_breaks = linebreaks(original_text)
for err in errors:
assert err.pos >= 0
err.orig_doc, err.orig_pos = source_mapping(err.pos)
err.line, err.column = line_col(line_breaks, err.orig_pos)
err.orig_doc, err.orig_offset, err.orig_pos = source_mapping(err.pos)
err.line, err.column = relative_lc(line_breaks, err.orig_pos, err.orig_offset)
# adjust length in case it exceeds the text size. As this is non-fatal
# it should be adjusted rather than an error raised to avoid
# unnecessary special-case treatments in other places
if err.orig_pos + err.length > len(original_text):
err.length = len(original_text) - err.orig_pos
err.end_line, err.end_column = line_col(line_breaks, err.orig_pos + err.length)
def canonical_error_strings(errors: List[Error], source_file_name: str = '') -> List[str]:
if err.orig_pos + err.length > len(err.orig_doc):
err.length = len(err.orig_doc) - err.orig_pos
err.end_line, err.end_column = relative_lc(
line_breaks, err.orig_pos + err.length, err.orig_offset)
# def canonical_error_strings(errors: List[Error], source_file_name: str = '') -> List[str]:
# """Returns the list of error strings in canonical form that can be parsed by most
# editors, i.e. "relative filepath : line : column : severity (code) : error string"
# """
# if errors:
# if is_filename(source_file_name):
# cwd = os.getcwd()
# if source_file_name.startswith(cwd):
# rel_path = source_file_name[len(cwd)]
# else:
# rel_path = source_file_name
# error_strings = [rel_path + ':' + str(err) for err in errors]
# else:
# error_strings = [str(err) for err in errors]
# else:
# error_strings = []
# return error_strings
def canonical_error_strings(errors: List[Error]) -> List[str]:
"""Returns the list of error strings in canonical form that can be parsed by most
editors, i.e. "relative filepath : line : column : severity (code) : error string"
"""
if errors:
if is_filename(source_file_name):
cwd = os.getcwd()
if source_file_name.startswith(cwd):
rel_path = source_file_name[len(cwd)]
error_strings = []
for err in errors:
source_file_name = err.orig_doc
if is_filename(source_file_name):
cwd = os.getcwd()
if source_file_name.startswith(cwd):
rel_path = source_file_name[len(cwd)]
else:
rel_path = source_file_name
error_strings.append(rel_path + ':' + str(err))
else:
rel_path = source_file_name
error_strings = [rel_path + ':' + str(err) for err in errors]
else:
error_strings = [str(err) for err in errors]
error_strings.append(str(err))
else:
error_strings = []
return error_strings
This diff is collapsed.
......@@ -82,8 +82,9 @@ class SourceMap:
class SourceLocation(NamedTuple):
source_name: str # the file name (or path or uri) of the source code
pos: int # a position within this file
source_name: str # the file name (or path or uri) of the source code
source_offset: int # the offset of this file within the complete source text
pos: int # a position within this file
SourceMapFunc = Union[Callable[[int], SourceLocation],
......@@ -129,7 +130,7 @@ def nil_preprocessor(source_text: str, source_name: str) -> Preprocessed:
"""
A preprocessor that does nothing, i.e. just returns the input.
"""
return Preprocessed(source_text, lambda i: SourceLocation(source_name, i))
return Preprocessed(source_text, lambda i: SourceLocation(source_name, 0, i))
def _apply_mappings(position: int, mappings: List[SourceMapFunc]) -> SourceLocation:
......@@ -141,8 +142,8 @@ def _apply_mappings(position: int, mappings: List[SourceMapFunc]) -> SourceLocat
"""
filename = ''
for mapping in mappings:
filename, position = mapping(position)
return SourceLocation(filename, position)
filename, offset, position = mapping(position)
return SourceLocation(filename, offset, position)
def _apply_preprocessors(source_text: str, source_name: str,
......@@ -167,6 +168,9 @@ def chain_preprocessors(*preprocessors) -> PreprocessorFunc:
"""
Merges a sequence of preprocessor functions in to a single function.
"""
if any(prep is preprocess_includes for prep in preprocessors[1:]):
raise ValueError("The preprocessor for include files must be applied first, "
"and there can be no more than one preprocessor for includes.")
return functools.partial(_apply_preprocessors, preprocessors=preprocessors)
......@@ -227,7 +231,7 @@ def strip_tokens(tokenized: str) -> str:
def neutral_mapping(pos: int) -> SourceLocation:
'''Maps source locations on itself and sets the source file name
to the empty string.'''
return SourceLocation('', pos)
return SourceLocation('', 0, pos)
def tokenized_to_original_mapping(tokenized_text: str, source_name: str='UNKNOWN_FILE') -> SourceMap:
......@@ -283,6 +287,7 @@ def source_map(position: int, srcmap: SourceMap) -> SourceLocation:
if i:
return SourceLocation(
srcmap.source_name,
0,
min(position + srcmap.offsets[i - 1], srcmap.positions[i] + srcmap.offsets[i]))
raise ValueError
......@@ -425,9 +430,11 @@ def generate_include_map(source_name: str,
def srcmap_includes(position: int, inclmap: IncludeMap) -> SourceLocation:
i = bisect.bisect_right(inclmap.positions, position)
if i:
offset = inclmap.offsets[i - 1]
return SourceLocation(
inclmap.file_names[i - 1],
position + inclmap.offsets[i - 1])
-offset,
position + offset)
raise ValueError
......
......@@ -39,7 +39,7 @@ def process_file(source: str, result_filename: str = '') -> str:
err_ext = '_ERRORS.txt' if has_errors(errors, ERROR) else '_WARNINGS.txt'
err_filename = os.path.splitext(result_filename)[0] + err_ext
with open(err_filename, 'w') as f:
f.write('\n'.join(canonical_error_strings(errors, source_filename)))
f.write('\n'.join(canonical_error_strings(errors)))
return err_filename
return ''
......@@ -174,7 +174,7 @@ if __name__ == "__main__":
result, errors = compile_src(file_names[0])
if errors:
for err_str in canonical_error_strings(errors, file_names[0]):
for err_str in canonical_error_strings(errors):
print(err_str)
if has_errors(errors, ERROR):
sys.exit(1)
......
......@@ -36,16 +36,18 @@ frontpages = sequence
#######################################################################
Chapters = { [_WSPC] Chapter }+
Chapter = "\chapter" heading { sequence | Sections }
Chapter = `\chapter` [hide_from_toc] heading { sequence | Sections }
Sections = { [_WSPC] Section }+
Section = "\section" heading { sequence | SubSections }
Section = `\section` [hide_from_toc] heading { sequence | SubSections }
SubSections = { [_WSPC] SubSection }+
SubSection = "\subsection" heading { sequence | SubSubSections }
SubSection = `\subsection` [hide_from_toc] heading { sequence | SubSubSections }
SubSubSections = { [_WSPC] SubSubSection }+
SubSubSection = "\subsubsection" heading { sequence | Paragraphs }
SubSubSection = `\subsubsection` [hide_from_toc] heading { sequence | Paragraphs }
hide_from_toc = "*"
Paragraphs = { [_WSPC] Paragraph }+
Paragraph = "\paragraph" heading { sequence | SubParagraphs }
......@@ -148,9 +150,10 @@ info_block = "{" §{ info_assoc } "}"
info_assoc = info_key ~ [ "(" §info_value ")" ]
info_key = `/` _NAME
info_value = TEXT_NOPAR { S TEXT_NOPAR } # text without parentheses
# text = CHARS { S CHARS }
# text = LINE { S LINE }
text = TEXT { S TEXT }
# text = CHARS { (S | trennung) CHARS }
# text = LINE { (S | trennung) LINE }
text = TEXT { (S | trennung) TEXT }
trennung = `\-`
no_command = "\begin{" | "\end" | BACKSLASH structural
blockcmd = BACKSLASH ( ( "begin{" | "end{" )
......
......@@ -49,7 +49,8 @@ from DHParser import start_logging, suspend_logging, resume_logging, is_filename
positions_of, replace_tag_names, add_attributes, delimit_children, merge_connected, \
has_attr, has_parent, ThreadLocalSingletonFactory, Error, canonical_error_strings, \
has_errors, apply_unless, WARNING, ERROR, FATAL, EMPTY_NODE, TreeReduction, CombinedParser, \
Preprocessed, neutral_mapping, preprocess_includes, gen_find_include_func, flatten_sxpr
Preprocessed, neutral_mapping, preprocess_includes, gen_find_include_func, flatten_sxpr, \
replace_content_with
#######################################################################
......@@ -85,7 +86,7 @@ class LaTeXGrammar(Grammar):
paragraph = Forward()
param_block = Forward()
text_element = Forward()
source_hash__ = "49543176de36a2f3271970b00b62761d"
source_hash__ = "1defeb8c06a45217d1fba760a3364e88"
disposable__ = re.compile('_WSPC$|_GAP$|_LB$|_PARSEP$|_LETTERS$|_NAME$|INTEGER$|FRAC$|_QUALIFIED$|TEXT_NOPAR$|TEXT$|_block_content$|block_environment$|known_environment$|text_element$|line_element$|inline_environment$|known_inline_env$|info_block$|begin_inline_env$|end_inline_env$|command$|known_command$')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
......@@ -126,7 +127,7 @@ class LaTeXGrammar(Grammar):
TXTCOMMAND = RegExp('\\\\text\\w+')
CMDNAME = Series(RegExp('\\\\(?:(?![\\d_])\\w)+'), dwsp__)
WARN_Komma = Series(Text(","), dwsp__)
text = Series(TEXT, ZeroOrMore(Series(S, TEXT)))
trennung = Text("\\-")
number = Series(INTEGER, Option(FRAC))
magnitude = Series(number, Option(UNIT))
value = Alternative(magnitude, _LETTERS, CMDNAME, param_block, block)
......@@ -137,17 +138,18 @@ class LaTeXGrammar(Grammar):
sequence = Series(Option(_WSPC), OneOrMore(Series(Alternative(paragraph, block_environment), Option(_PARSEP))))
block_of_paragraphs = Series(Series(Drop(Text("{")), dwsp__), Option(sequence), Series(Drop(Text("}")), dwsp__), mandatory=2)
param_config = Series(Series(Drop(Text("[")), dwsp__), Option(parameters), Series(Drop(Text("]")), dwsp__), mandatory=1)
cfg_text = ZeroOrMore(Alternative(Series(dwsp__, text), CMDNAME, SPECIAL))
text = Series(TEXT, ZeroOrMore(Series(Alternative(S, trennung), TEXT)))
structural = Alternative(Series(Drop(Text("subsection")), dwsp__), Series(Drop(Text("section")), dwsp__), Series(Drop(Text("chapter")), dwsp__), Series(Drop(Text("subsubsection")), dwsp__), Series(Drop(Text("paragraph")), dwsp__), Series(Drop(Text("subparagraph")), dwsp__), Series(Drop(Text("item")), dwsp__))
begin_environment = Series(Drop(RegExp('\\\\begin{')), NAME, Drop(RegExp('}')), mandatory=1)
no_command = Alternative(Series(Drop(Text("\\begin{")), dwsp__), Series(Drop(Text("\\end")), dwsp__), Series(BACKSLASH, structural))
cfg_text = ZeroOrMore(Alternative(Series(dwsp__, text), CMDNAME, SPECIAL))
config = Series(Series(Drop(Text("[")), dwsp__), Alternative(Series(parameters, Lookahead(Series(Drop(Text("]")), dwsp__))), cfg_text), Series(Drop(Text("]")), dwsp__), mandatory=1)
info_value = Series(TEXT_NOPAR, ZeroOrMore(Series(S, TEXT_NOPAR)))
info_key = Series(Drop(Text("/")), _NAME)
info_assoc = Series(info_key, dwsp__, Option(Series(Series(Drop(Text("(")), dwsp__), info_value, Series(Drop(Text(")")), dwsp__), mandatory=1)))
info_block = Series(Series(Drop(Text("{")), dwsp__), ZeroOrMore(info_assoc), Series(Drop(Text("}")), dwsp__), mandatory=1)
end_environment = Series(Drop(RegExp('\\\\end{')), Pop(NAME), Drop(RegExp('}')), mandatory=1)
heading = Synonym(block)
hide_from_toc = Series(Text("*"), dwsp__)
hypersetup = Series(Series(Drop(Text("\\hypersetup")), dwsp__), param_block)
pdfinfo = Series(Series(Drop(Text("\\pdfinfo")), dwsp__), info_block)
documentclass = Series(Series(Drop(Text("\\documentclass")), dwsp__), Option(config), block)
......@@ -172,10 +174,10 @@ class LaTeXGrammar(Grammar):
known_inline_env = Synonym(inline_math)
inline_environment = Alternative(known_inline_env, generic_inline_env)
generic_command = Alternative(Series(NegativeLookahead(no_command), CMDNAME, Option(Series(Option(Series(dwsp__, config)), OneOrMore(Series(dwsp__, block))))), Series(Drop(Text("{")), CMDNAME, _block_content, Drop(Text("}")), mandatory=3))
heading = Synonym(block)
SubParagraph = Series(Series(Drop(Text("\\subparagraph")), dwsp__), heading, Option(sequence))
SubParagraphs = OneOrMore(Series(Option(_WSPC), SubParagraph))
frontpages = Synonym(sequence)
Paragraph = Series(Series(Drop(Text("\\paragraph")), dwsp__), heading, ZeroOrMore(Alternative(sequence, SubParagraphs)))
SubParagraphs = OneOrMore(Series(Option(_WSPC), SubParagraph))
multicolumn = Series(Series(Drop(Text("\\multicolumn")), dwsp__), Series(Drop(Text("{")), dwsp__), INTEGER, Series(Drop(Text("}")), dwsp__), tabular_config, block_of_paragraphs)
known_command = Alternative(citet, citep, footnote, includegraphics, caption, multicolumn, hline, cline, documentclass, pdfinfo, hypersetup)
command = Alternative(known_command, text_command, generic_command)
......@@ -192,16 +194,17 @@ class LaTeXGrammar(Grammar):
generic_block = Series(begin_generic_block, sequence, end_generic_block, mandatory=2)
known_environment = Alternative(itemize, enumerate, figure, tabular, quotation, verbatim)
preamble = OneOrMore(Series(Option(_WSPC), command))
Paragraphs = OneOrMore(Series(Option(_WSPC), Paragraph))
Paragraph = Series(Series(Drop(Text("\\paragraph")), dwsp__), heading, ZeroOrMore(Alternative(sequence, SubParagraphs)))
Index = Series(Option(_WSPC), Series(Drop(Text("\\printindex")), dwsp__))
Bibliography = Series(Option(_WSPC), Series(Drop(Text("\\bibliography")), dwsp__), heading)
SubSubSection = Series(Series(Drop(Text("\\subsubsection")), dwsp__), heading, ZeroOrMore(Alternative(sequence, Paragraphs)))
Paragraphs = OneOrMore(Series(Option(_WSPC), Paragraph))
SubSubSection = Series(Drop(Text("\\subsubsection")), Option(hide_from_toc), heading, ZeroOrMore(Alternative(sequence, Paragraphs)))
SubSubSections = OneOrMore(Series(Option(_WSPC), SubSubSection))
SubSection = Series(Series(Drop(Text("\\subsection")), dwsp__), heading, ZeroOrMore(Alternative(sequence, SubSubSections)))
SubSection = Series(Drop(Text("\\subsection")), Option(hide_from_toc), heading, ZeroOrMore(Alternative(sequence, SubSubSections)))
SubSections = OneOrMore(Series(Option(_WSPC), SubSection))
Section = Series(Series(Drop(Text("\\section")), dwsp__), heading, ZeroOrMore(Alternative(sequence, SubSections)))
Section = Series(Drop(Text("\\section")), Option(hide_from_toc), heading, ZeroOrMore(Alternative(sequence, SubSections)))
Sections = OneOrMore(Series(Option(_WSPC), Section))
Chapter = Series(Series(Drop(Text("\\chapter")), dwsp__), heading, ZeroOrMore(Alternative(sequence, Sections)))
Chapter = Series(Drop(Text("\\chapter")), Option(hide_from_toc), heading, ZeroOrMore(Alternative(sequence, Sections)))
Chapters = OneOrMore(Series(Option(_WSPC), Chapter))
document = Series(Option(_WSPC), Series(Drop(Text("\\begin{document}")), dwsp__), frontpages, Alternative(Chapters, Sections), Option(Bibliography), Option(Index), Option(_WSPC), Series(Drop(Text("\\end{document}")), dwsp__), Option(_WSPC), EOF, mandatory=2)
param_block.set(Series(Series(Drop(Text("{")), dwsp__), Option(parameters), Series(Drop(Text("}")), dwsp__)))
......@@ -305,6 +308,7 @@ LaTeX_AST_transformation_table = {
"frontpages": reduce_single_child,
"Chapters, Sections, SubSections, SubSubSections, Paragraphs, SubParagraphs": [],
"Chapter, Section, SubSection, SubSubSection, Paragraph, SubParagraph": [],
"hide_from_toc": [replace_content_with('')],
"heading": reduce_single_child,
"Bibliography": [],
"Index": [],
......@@ -347,6 +351,7 @@ LaTeX_AST_transformation_table = {
"block": [flatten, reduce_single_child],
"flag": [reduce_single_child],
"text": collapse,
"trennung": replace_content_with(''),
"no_command, blockcmd": [],
"structural": [],
"CMDNAME": [remove_whitespace, reduce_single_child],
......@@ -944,7 +949,7 @@ if __name__ == "__main__":
result, errors = compile_src(file_names[0])
if errors:
for err_str in canonical_error_strings(errors, file_names[0]):
for err_str in canonical_error_strings(errors):
print(err_str)
if has_errors(errors, ERROR):
sys.exit(1)
......
[match:text]
1* : Some plain text
1 : Some plain text
2* : "ein getr\-enntes Wort"
[ast:text]
2: "ein getrenntes Wort"
[fail:text]
10: Low-level text must not contain \& escaped characters.
......
......@@ -446,7 +446,7 @@ if __name__ == "__main__":
result, errors = compile_src(file_names[0])
if errors:
for err_str in canonical_error_strings(errors, file_names[0]):
for err_str in canonical_error_strings(errors):
print(err_str)
if has_errors(errors, ERROR):
sys.exit(1)
......
......@@ -47,16 +47,17 @@ class TestDHParserCommandLineTool:
counter = 10
while counter > 0:
try:
os.mkdir(TFFN('test_dhparser_data'))
self.dirname = TFFN('test_dhparser_data')
os.mkdir(self.dirname)
counter = 0
except FileExistsError:
time.sleep(1)
time.sleep(0.5)
counter -= 1
self.nulldevice = " >/dev/null" if platform.system() != "Windows" else " > NUL"
self.python = sys.executable + ' '
def teardown(self):
name = TFFN('test_dhparser_data')
name = self.dirname
if os.path.exists(name + '/%sServer.py' % name):
system(self.python + name + '/%sServer.py --stopserver' % name + self.nulldevice)
if os.path.exists(name) and os.path.isdir(name):
......@@ -75,7 +76,7 @@ class TestDHParserCommandLineTool:
# os.rmdir('out')
def test_dhparser(self):
name = TFFN('test_dhparser_data')
name = self.dirname
# test compiler creation and execution
system(self.python + '../DHParser/scripts/dhparser.py ' + name + self.nulldevice)
system(self.python + name + '/tst_%s_grammar.py --singlethread ' % name + self.nulldevice)
......
......@@ -156,7 +156,7 @@ class TestTokenParsing:
def verify_mapping(self, teststr, orig_text, preprocessed_text, mapping):
mapped_pos = preprocessed_text.find(teststr)
assert mapped_pos >= 0
file_name, original_pos = mapping(mapped_pos)
file_name, file_offset, original_pos = mapping(mapped_pos)
# original_pos = source_map(mapped_pos, self.srcmap)
assert orig_text[original_pos:original_pos + len(teststr)] == teststr, \
'"%s" (%i) wrongly mapped onto "%s" (%i)' % \
......@@ -181,7 +181,7 @@ class TestTokenParsing:
previous_index = 0
L = len(self.code)
for mapped_index in range(len(self.tokenized)):
_, index = source_map(mapped_index, self.srcmap)
_, _, index = source_map(mapped_index, self.srcmap)
assert previous_index <= index <= L, \
"%i <= %i <= %i violated" % (previous_index, index, L)
previous_index = index
......@@ -243,8 +243,9 @@ def system(s: str) -> int:
class TestIncludes:
cwd = os.getcwd()
def setup(self):
self.cwd = os.getcwd()
os.chdir(scriptpath)
# avoid race-condition
counter = 10
......@@ -254,7 +255,7 @@ class TestIncludes:
os.mkdir(TFFN('test_preprocess_data'))
counter = 0
except FileExistsError:
time.sleep(1)
time.sleep(0.5)
counter -= 1
os.chdir(os.path.join(scriptpath, self.dirname))
......@@ -264,7 +265,7 @@ class TestIncludes:
shutil.rmtree(self.dirname)
if os.path.exists(self.dirname) and not os.listdir(self.dirname):
os.rmdir(self.dirname)
os.chdir(self.cwd)
os.chdir(TestIncludes.cwd)
def create_files(self, files: Dict[str, str]):
......@@ -280,7 +281,7 @@ class TestIncludes:
# print(mapping)
assert text == main.replace('include(sub.txt)', 'abc'), text
for i in range(len(text)):
name, k = mapping(i)
name, offset, k = mapping(i)
# print(i, k, name)
txt = main if name == 'main.txt' else sub
assert text[i] == txt[k], f'{i}: {text[i]} != {txt[k]} in {name}'
......@@ -302,14 +303,14 @@ class TestIncludes:
text, mapping = preprocess_includes(None, 'main', find_func)
# print(mapping)
substrings = {}
for k, v in reversed(ensemble.items()):
for k, v in reversed(list(ensemble.items())):
for name, content in substrings.items():
v = v.replace(f'#include({name})', content)
substrings[k] = v
assert text == substrings['main']
# print(text)
for i in range(len(text)):
name, k = mapping(i)
name, offset, k = mapping(i)
txt = ensemble[name]
# print(name, txt, i, k)
assert text[i] == txt[k], f'{i}: {text[i]} != {txt[k]} in {name}'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment