16.12.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit 6944ecc5 authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

Better '_compiler.py'-files: It is now more explicit, where user may add changes.

parent c2d5de0e
......@@ -508,24 +508,6 @@ def ASTTransform(node, transtable):
recursive_ASTTransform(node)
# def preserve_errors(transformation):
# """Wrapper that moves errors of child nodes that have been removed
# after the application of function ``transformation()`` to the root
# node. As an optimization, ``transformation()`` will only be called
# if its ``node``-argument (i.e. the root-node) has children at all.
# """
# # requires nd.collect_errors() to return a set
# @functools.wraps(transformation)
# def preserve_errors_wrapper(*args, **kwds):
# nd = kwds['node'] if 'node' in kwds else args[0]
# if nd.children:
# errors = nd.collect_errors()
# transformation(*args, **kwds)
# for err in errors - nd.collect_errors():
# nd.add_error(err[1], err[0])
# return preserve_errors_wrapper
def no_transformation(node):
pass
......@@ -726,10 +708,7 @@ def DEBUG_DUMP_PARSING_HISTORY(grammar_base, document):
write_log(full_history, '_full')
write_log(match_history, '_match')
write_log(errors_only, '_errors')
# hist = ["; ".join(prepare_line(r)) for r in grammar_base.history]
# lines = [prepare_line(r) for r in grammar_base.history]
# n = max(len(line[0]) for line in lines)
# hist = [" ".join((l[0] + ' ' * (n - len(l[0])), l[1], l[2])) for l in lines]
def add_parser_guard(parser_func):
......@@ -1074,13 +1053,6 @@ def escape_re(s):
s = s.replace(esc_ch, '\\' + esc_ch)
return s
#
# class Token(RE):
# def __init__(self, token, wL=None, wR=None, name=None):
# super(Token, self).__init__(escape_re(token), wL, wR, name or TOKEN_KEYWORD)
#
# def __str__(self):
# return self.name or 'Token "%s"' % self.main.regexp.pattern.replace('\\', '')
def Token(token, wL=None, wR=None, name=None):
return RE(escape_re(token), wL, wR, name or TOKEN_KEYWORD)
......@@ -1390,11 +1362,11 @@ PARSER_SYMBOLS = {'RegExp', 'mixin_comment', 'RE', 'Token', 'Required',
'Pop'}
########################################################################
#######################################################################
#
# Syntax driven compilation support
#
########################################################################
#######################################################################
class CompilerBase:
def compile__(self, node):
......@@ -1595,10 +1567,9 @@ class EBNFCompiler(CompilerBase):
"""Generates a Parser from an abstract syntax tree of a grammar specified
in EBNF-Notation.
"""
# RX_DIRECTIVE = re.compile('(?:#|@)\s*(?P<key>\w*)\s*=\s*(?P<value>.*)') # old, can be removed!
RESERVED_SYMBOLS = {TOKEN_KEYWORD, WHITESPACE_KEYWORD}
KNOWN_DIRECTIVES = {'comment', 'whitespace', 'tokens', 'literalws'}
VOWELS = {'A', 'E', 'I', 'O', 'U'} # what about cases like 'hour', 'universe' etc. ?
VOWELS = {'A', 'E', 'I', 'O', 'U'} # what about cases like 'hour', 'universe' etc.?
AST_ERROR = "Badly structured syntax tree. " \
"Potentially due to erroneuos AST transformation."
PREFIX_TABLE = [('§', 'Required'), ('&', 'Lookahead'),
......@@ -1905,13 +1876,30 @@ class EBNFCompiler(CompilerBase):
return set(item.strip() for item in node.result.split(','))
########################################################################
#######################################################################
#
# support for compiling DSLs based on an EBNF-grammar
#
########################################################################
#######################################################################
DELIMITER = "\n\n### DON'T EDIT OR REMOVE THIS LINE ###\n\n"
SECTION_MARKER = """\n
#######################################################################
#
# {marker}
#
#######################################################################
\n"""
RX_SECTION_MARKER = re.compile(SECTION_MARKER.format(marker=r'.*?SECTION.*?'))
SYMBOLS_SECTION = "SYMBOLS SECTION - Can be edited. Changes will be preserved."
SCANNER_SECTION = "SCANNER SECTION - Can be edited. Changes will be preserved."
PARSER_SECTION = "PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!"
AST_SECTION = "AST SECTION - Can be edited. Changes will be preserved."
COMPILER_SECTION = "COMPILER SECTION - Can be edited. Changes will be preserved."
END_SECTIONS_MARKER = "END OF PYDSL-SECTIONS"
# DELIMITER = "\n\n### DON'T EDIT OR REMOVE THIS LINE ###\n\n"
def is_python_code(text_or_file):
......@@ -1955,6 +1943,7 @@ def compile_python_object(python_src, obj_name_ending="Grammar"):
"""Compiles the python source code and returns the object the name of which
ends with `obj_name_ending`.
"""
code = compile(python_src, '<string>', 'exec')
module_vars = globals()
allowed_symbols = PARSER_SYMBOLS | AST_SYMBOLS | COMPILER_SYMBOLS
......@@ -2000,11 +1989,16 @@ def get_grammar_instance(grammar):
def load_compiler_suite(compiler_suite):
"""
"""
global DELIMITER
global RX_SECTION_MARKER
assert isinstance(compiler_suite, str)
source = load_if_file(compiler_suite)
if is_python_code(compiler_suite):
scanner_py, parser_py, ast_py, compiler_py = source.split(DELIMITER)
try:
intro, syms, scanner_py, parser_py, ast_py, compiler_py, outro = \
RX_SECTION_MARKER.split(source)
except ValueError as error:
raise ValueError('File "' + compiler_suite + '" seems to be corrupted. '
'Please delete or repair file manually.')
scanner = compile_python_object(scanner_py, 'Scanner')
ast = compile_python_object(ast_py, 'TransTable')
compiler = compile_python_object(compiler_py, 'Compiler')
......@@ -2039,7 +2033,7 @@ def compileDSL(text_or_file, dsl_grammar, trans_table, compiler,
return result
def run_compiler(source_file, compiler_suite="", extension=".dst"):
def run_compiler(source_file, compiler_suite="", extension=".xml"):
"""Compiles the a source file with a given compiler and writes the result
to a file. If no `compiler_suite` is given it is assumed that the source
file is an EBNF grammar. In this case the result will be a Python
......@@ -2047,10 +2041,17 @@ def run_compiler(source_file, compiler_suite="", extension=".dst"):
for a scanner, AST transformation table, and compiler. If the Python
script already exists only the parser name in the script will be
updated. (For this to work, the different names need to be delimited
by the standard `DELIMITER`-line!).
section marker blocks.).
`run_compiler()` returns a list of error messages or an empty list if no
errors occured.
"""
def import_block(module, symbols):
symlist = list(symbols)
grouped = [symlist[i:i + 4] for i in range(0, len(symlist), 4)]
return ("\nfrom " + module + " import "
+ ', \\\n '.join(', '.join(g) for g in grouped) + '\n\n')
filepath = os.path.normpath(source_file)
with open(source_file, encoding="utf-8") as f:
source = f.read()
......@@ -2068,32 +2069,44 @@ def run_compiler(source_file, compiler_suite="", extension=".dst"):
if errors:
return errors
elif trans == EBNFTransTable: # either an EBNF- or no compiter suite given
elif trans == EBNFTransTable: # either an EBNF- or no compiler suite given
f = None
global DELIMITER
global SECTION_MARKER, RX_SECTION_MARKER, SCANNER_SECTION, PARSER_SECTION, \
AST_SECTION, COMPILER_SECTION, END_SECTIONS_MARKER
try:
f = open(rootname + '_compiler.py', 'r', encoding="utf-8")
source = f.read()
scanner, parser, ast, compiler = source.split(DELIMITER)
intro, syms, scanner, parser, ast, compiler, outro = RX_SECTION_MARKER.split(source)
except (PermissionError, FileNotFoundError, IOError) as error:
intro, outro = '', ''
syms = import_block("PyDSL", PARSER_SYMBOLS | AST_SYMBOLS | {'CompilerBase'})
scanner = compiler.gen_scanner_skeleton()
ast = compiler.gen_AST_skeleton()
compiler = compiler.gen_compiler_skeleton()
except ValueError as error:
raise ValueError('File "' + rootname + '_compiler.py" seems to be corrupted. '
'Please delete or repair file manually!')
finally:
if f: f.close()
try:
f = open(rootname + '_compiler.py', 'w', encoding="utf-8")
f.write(intro)
f.write(SECTION_MARKER.format(marker=SYMBOLS_SECTION))
f.write(syms)
f.write(SECTION_MARKER.format(marker=SCANNER_SECTION))
f.write(scanner)
f.write(DELIMITER)
f.write(SECTION_MARKER.format(marker=PARSER_SECTION))
f.write(result)
f.write(DELIMITER)
f.write(SECTION_MARKER.format(marker=AST_SECTION))
f.write(ast)
f.write(DELIMITER)
f.write(SECTION_MARKER.format(marker=COMPILER_SECTION))
f.write(compiler)
f.write(SECTION_MARKER.format(marker=END_SECTIONS_MARKER))
f.write(outro)
except (PermissionError, FileNotFoundError, IOError) as error:
print('# Could not write file "' + rootname + '.py" because of: '
print('# Could not write file "' + rootname + '_compiler.py" because of: '
+ "\n# ".join(str(error).split('\n)')))
print(result)
finally:
......
#######################################################################
#
# SYMBOLS SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
from PyDSL import Pop, NegativeLookbehind, Capture, no_transformation, \
Token, Alternative, mixin_comment, RE, \
Sequence, remove_comments, Retrieve, is_scanner_token, \
Lookbehind, replace_by_single_child, remove_scanner_tokens, remove_whitespace, \
is_whitespace, ZeroOrMore, remove_enclosing_delimiters, CompilerBase, \
RegExp, NegativeLookahead, WHITESPACE_KEYWORD, GrammarBase, \
reduce_single_child, Optional, remove_children_if, remove_expendables, \
remove_tokens, is_comment, partial, OneOrMore, \
Forward, TOKEN_KEYWORD, Required, flatten, \
is_expendable, Lookahead
#######################################################################
#
# SCANNER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def MLWScanner(text):
return text
### DON'T EDIT OR REMOVE THIS LINE ###
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class MLWGrammar(GrammarBase):
r"""Parser for a MLW source file, with this grammar:
......@@ -100,7 +132,7 @@ class MLWGrammar(GrammarBase):
DATEI_ENDE = !/./
NIEMALS = /(?!.)/
"""
source_hash__ = "4632b08b0de268e81efb1b92b322076e"
source_hash__ = "26b36c7d970ea079fb4207bdcffd5237"
parser_initialization__ = "upon instatiation"
wsp__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)')
wspL__ = wsp__
......@@ -149,8 +181,11 @@ class MLWGrammar(GrammarBase):
root__ = Artikel
### DON'T EDIT OR REMOVE THIS LINE ###
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def test(node):
print(node.as_sexpr())
......@@ -236,7 +271,11 @@ MLWTransTable = {
}
### DON'T EDIT OR REMOVE THIS LINE ###
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
class MLWCompiler(CompilerBase):
"""Compiler for the abstract-syntax-tree of a MLW source file.
......@@ -303,6 +342,15 @@ class MLWCompiler(CompilerBase):
def Schreibweise(self, node):
pass
def Beleg(self, node):
pass
def Verweis(self, node):
pass
def VerweisZiel(self, node):
pass
def BedeutungsPosition(self, node):
pass
......@@ -327,21 +375,27 @@ class MLWCompiler(CompilerBase):
def EinBeleg(self, node):
pass
def Beleg(self, node):
def Zusatz(self, node):
pass
def Verweis(self, node):
def Autorinfo(self, node):
pass
def VerweisZiel(self, node):
def Name(self, node):
pass
def WORT(self, node):
pass
def WORT_GROSS(self, node):
pass
def WORT_KLEIN(self, node):
pass
def LAT_WORT(self, node):
pass
def GROSSSCHRIFT(self, node):
pass
......@@ -354,3 +408,11 @@ class MLWCompiler(CompilerBase):
def NIEMALS(self, node):
pass
#######################################################################
#
# END OF PYDSL-SECTIONS
#
#######################################################################
def MLWScanner(text):
return text
### DON'T EDIT OR REMOVE THIS LINE ###
class MLWGrammar(GrammarBase):
r"""Parser for a MLW source file, with this grammar:
# EBNF-Syntax für MLW-Artikel
@ comment = /#.*(?:\n|$)/ # Kommentare beginnen mit '#' und reichen bis zum Zeilenende
@ whitespace = /\s*/ # Auch Zeilenspränge zählen als Leerraum
@ literalws = both # Leerraum vor und nach Literalen wird automatisch entfernt
Artikel = [LEER]
§LemmaPosition [ArtikelKopf] §BedeutungsPosition §Autorinfo
[LEER] DATEI_ENDE
#### LEMMA-POSITION ##########################################################
LemmaPosition = "LEMMA" §Lemma [LemmaVarianten] §GrammatikPosition
Lemma = [_tll] WORT_KLEIN
_tll = "*"
LemmaVarianten = "VARIANTEN" §LVariante { "," §LVariante } [";" §LVZusatz]
LVariante = ~/(?:[a-z]|-)+/~ # Buchstabenfolge mit Trennzeichen "-"
LVZusatz = "sim."
#### GRAMMATIK-POSITION ######################################################
GrammatikPosition = "GRAMMATIK" §_wortart §";" §Flexionen [_genus] {GrammatikVarianten} [";" | "."]
_wortart = "nomen" | "n." |
"verb" | "v." |
"adverb" | "adv." |
"adjektiv" | "adj."
GrammatikVarianten = ";" §GVariante
GVariante = Flexionen [_genus] ":" Beleg
Flexionen = Flexion { "," §Flexion }
Flexion = /-?[a-z]+/~
_genus = "maskulinum" | "m." |
"femininum" | "f." |
"neutrum" | "n."
#### ARTIKEL-KOPF ############################################################
ArtikelKopf = SchreibweisenPosition
SchreibweisenPosition = "SCHREIBWEISE" §SWTyp ":" §SWVariante { "," §SWVariante}
SWTyp = "script." | "script. fat-"
SWVariante = Schreibweise ":" Beleg
Schreibweise = "vizreg-" | "festregel(a)" | "fezdregl(a)" | "fat-"
Beleg = Verweis
Verweis = ~/>>\w+/~
VerweisZiel = ~/<\w+>/~
#### BEDEUTUNGS-POSITION #####################################################
BedeutungsPosition = { "BEDEUTUNG" Bedeutung }+
Bedeutung = (Interpretamente | Bedeutungskategorie) [Belege]
Bedeutungskategorie = /(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+/~
Interpretamente = LateinischeBedeutung DeutscheBedeutung
LateinischeBedeutung = "LAT" /(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+/~
DeutscheBedeutung = "DEU" /(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+/~
Belege = "BELEGE" { "*" EinBeleg }
EinBeleg = { !(/\s*/ ("*" | "BEDEUTUNG" | "AUTOR" | "NAME" | "ZUSATZ")) /\s?.*/ }+
[Zusatz]
Zusatz = "ZUSATZ" /\s?.*/
#### AUTOR/AUTORIN ###########################################################
Autorinfo = ("AUTORIN" | "AUTOR") Name
Name = WORT { WORT | /[A-ZÄÖÜÁÀ]\./ }
#### MISZELLANEEN ############################################################
WORT = /[A-ZÄÖÜ]?[a-zäöüß]+/~
WORT_GROSS = /[A-ZÄÖÜ][a-zäöüß]+/~
WORT_KLEIN = /[a-zäöüß]+/~
LAT_WORT = /[a-z]+/~
GROSSSCHRIFT = /[A-ZÄÖÜ]+/~
LEER = /\s*/
DATEI_ENDE = !/./
NIEMALS = /(?!.)/
"""
source_hash__ = "4632b08b0de268e81efb1b92b322076e"
parser_initialization__ = "upon instatiation"
wsp__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)')
wspL__ = wsp__
wspR__ = wsp__
NIEMALS = RE('(?!.)', wR='', wL='')
DATEI_ENDE = NegativeLookahead(RE('.', wR='', wL=''))
LEER = RE('\\s*', wR='', wL='')
GROSSSCHRIFT = RE('[A-ZÄÖÜ]+', wL='')
LAT_WORT = RE('[a-z]+', wL='')
WORT_KLEIN = RE('[a-zäöüß]+', wL='')
WORT_GROSS = RE('[A-ZÄÖÜ][a-zäöüß]+', wL='')
WORT = RE('[A-ZÄÖÜ]?[a-zäöüß]+', wL='')
Name = Sequence(WORT, ZeroOrMore(Alternative(WORT, RE('[A-ZÄÖÜÁÀ]\\.', wR='', wL=''))))
Autorinfo = Sequence(Alternative(Token("AUTORIN"), Token("AUTOR")), Name)
Zusatz = Sequence(Token("ZUSATZ"), RE('\\s?.*', wR='', wL=''))
EinBeleg = Sequence(OneOrMore(Sequence(NegativeLookahead(Sequence(RE('\\s*', wR='', wL=''), Alternative(Token("*"), Token("BEDEUTUNG"), Token("AUTOR"), Token("NAME"), Token("ZUSATZ")))), RE('\\s?.*', wR='', wL=''))), Optional(Zusatz))
Belege = Sequence(Token("BELEGE"), ZeroOrMore(Sequence(Token("*"), EinBeleg)))
DeutscheBedeutung = Sequence(Token("DEU"), RE('(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+', wL=''))
LateinischeBedeutung = Sequence(Token("LAT"), RE('(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+', wL=''))
Interpretamente = Sequence(LateinischeBedeutung, DeutscheBedeutung)
Bedeutungskategorie = RE('(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+', wL='')
Bedeutung = Sequence(Alternative(Interpretamente, Bedeutungskategorie), Optional(Belege))
BedeutungsPosition = OneOrMore(Sequence(Token("BEDEUTUNG"), Bedeutung))
VerweisZiel = RE('<\\w+>')
Verweis = RE('>>\\w+')
Beleg = Verweis
Schreibweise = Alternative(Token("vizreg-"), Token("festregel(a)"), Token("fezdregl(a)"), Token("fat-"))
SWVariante = Sequence(Schreibweise, Token(":"), Beleg)
SWTyp = Alternative(Token("script."), Token("script. fat-"))
SchreibweisenPosition = Sequence(Token("SCHREIBWEISE"), Required(SWTyp), Token(":"), Required(SWVariante), ZeroOrMore(Sequence(Token(","), Required(SWVariante))))
ArtikelKopf = SchreibweisenPosition
_genus = Alternative(Token("maskulinum"), Token("m."), Token("femininum"), Token("f."), Token("neutrum"), Token("n."))
Flexion = RE('-?[a-z]+', wL='')
Flexionen = Sequence(Flexion, ZeroOrMore(Sequence(Token(","), Required(Flexion))))
GVariante = Sequence(Flexionen, Optional(_genus), Token(":"), Beleg)
GrammatikVarianten = Sequence(Token(";"), Required(GVariante))
_wortart = Alternative(Token("nomen"), Token("n."), Token("verb"), Token("v."), Token("adverb"), Token("adv."), Token("adjektiv"), Token("adj."))
GrammatikPosition = Sequence(Token("GRAMMATIK"), Required(_wortart), Required(Token(";")), Required(Flexionen), Optional(_genus), ZeroOrMore(GrammatikVarianten), Optional(Alternative(Token(";"), Token("."))))
LVZusatz = Token("sim.")
LVariante = RE('(?:[a-z]|-)+')
LemmaVarianten = Sequence(Token("VARIANTEN"), Required(LVariante), ZeroOrMore(Sequence(Token(","), Required(LVariante))), Optional(Sequence(Token(";"), Required(LVZusatz))))
_tll = Token("*")
Lemma = Sequence(Optional(_tll), WORT_KLEIN)
LemmaPosition = Sequence(Token("LEMMA"), Required(Lemma), Optional(LemmaVarianten), Required(GrammatikPosition))
Artikel = Sequence(Optional(LEER), Required(LemmaPosition), Optional(ArtikelKopf), Required(BedeutungsPosition), Required(Autorinfo), Optional(LEER), DATEI_ENDE)
root__ = Artikel
### DON'T EDIT OR REMOVE THIS LINE ###
def test(node):
print(node.as_sexpr())
def join_strings(node, delimiter='\n'):
new_result = []
n = 0
while n < len(node.result):
nd = node.result[n]
if not nd.children:
a = n
n += 1
while n < len(node.result) and not node.result[n].children:
n += 1
nd.result = delimiter.join((r.result for r in node.result[a:n]))
new_result.append(nd)
node.result = tuple(new_result)
print(node.as_sexpr())
MLWTransTable = {
# AST Transformations for the MLW-grammar
"Artikel": no_transformation,
"LemmaPosition":
[partial(remove_tokens, tokens={'LEMMA'})],
"Lemma": no_transformation,
"_tll, _wortart, _genus":
[remove_expendables, reduce_single_child],
"LemmaVarianten":
[partial(remove_tokens, tokens={'VARIANTEN'}), flatten,
partial(remove_tokens, tokens={',', ';'})],
"LVariante, LVZusatz, Schreibweise, Name":
[remove_expendables, reduce_single_child],
"SWVariante":
[remove_expendables, partial(remove_tokens, tokens={':'})],
"GrammatikPosition":
[partial(remove_tokens, tokens={'GRAMMATIK', ';'}), flatten],
"GrammatikVarianten":
[partial(remove_tokens, tokens={';'}), replace_by_single_child],
"GVariante":
[partial(remove_tokens, tokens={':'})],
"Flexionen":
[flatten, partial(remove_tokens, tokens={',', ';'})],
"Flexion, Verweis":
[remove_expendables, reduce_single_child],
"Zusatz":
[remove_expendables, remove_tokens, reduce_single_child],
"ArtikelKopf": no_transformation,
"SchreibweisenPosition":
[partial(remove_tokens, tokens={'SCHREIBWEISE', ':'}),
flatten, partial(remove_tokens, tokens={','})],
"SWTyp": no_transformation,
"BedeutungsPosition":
[flatten, partial(remove_tokens, tokens={'BEDEUTUNG'})],
"Bedeutung": no_transformation,
"Bedeutungskategorie": no_transformation,
"Interpretamente": no_transformation,
"LateinischeBedeutung, DeutscheBedeutung":
[remove_expendables, remove_tokens, reduce_single_child],
"Belege":
[flatten, remove_tokens],
"EinBeleg":
[flatten, remove_expendables, join_strings, reduce_single_child],
"Beleg": no_transformation,
"VerweisZiel": no_transformation,
"Autorinfo":
[partial(remove_tokens, tokens={'AUTORIN', 'AUTOR'})],
"WORT, WORT_KLEIN, WORT_GROSS, GROSSSCHRIFT":
# test,
[remove_expendables, reduce_single_child],
"LEER": no_transformation,
"DATEI_ENDE": no_transformation,
"NIEMALS": no_transformation,
(TOKEN_KEYWORD, WHITESPACE_KEYWORD):
[remove_expendables, reduce_single_child],
"*":
remove_expendables,
"~":
partial(remove_tokens, tokens={',', ';'}),
"":
[remove_expendables, replace_by_single_child]
}
### DON'T EDIT OR REMOVE THIS LINE ###
class MLWCompiler(CompilerBase):
"""Compiler for the abstract-syntax-tree of a MLW source file.
"""
def __init__(self, grammar_name="MLW"):
super(MLWCompiler, self).__init__()
assert re.match('\w+\Z', grammar_name)
def Artikel(self, node):
return node
def LemmaPosition(self, node):
pass
def Lemma(self, node):
pass
def _tll(self, node):
pass
def LemmaVarianten(self, node):
pass
def LVariante(self, node):
pass
def LVZusatz(self, node):
pass