Commit 50677632 authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

- get_ functions now have neutral names in generated compiler modules.

parent f6b1769f
......@@ -85,17 +85,17 @@ from DHParser.syntaxtree import Node, traverse, remove_enclosing_delimiters, \\
DHPARSER_MAIN = '''
def compile_{NAME}(source):
def compile_src(source):
"""Compiles ``source`` and returns (result, errors, ast).
"""
with logging("LOGS"):
compiler = get_{NAME}_compiler()
compiler = get_compiler()
cname = compiler.__class__.__name__
log_file_name = os.path.basename(os.path.splitext(source)[0]) \\
if is_filename(source) < 0 else cname[:cname.find('.')] + '_out'
result = compile_source(source, get_{NAME}_scanner(),
get_{NAME}_grammar(),
get_{NAME}_transformer(), compiler)
result = compile_source(source, get_scanner(),
get_grammar(),
get_transformer(), compiler)
return result
......@@ -109,7 +109,7 @@ if __name__ == "__main__":
else:
print(result)
else:
print("Usage: {NAME}_compiler.py [FILENAME]")
print("Usage: {NAME}Compiler.py [FILENAME]")
'''
......@@ -251,7 +251,7 @@ def parser_factory(ebnf_src, branding="DSL"):
"""
grammar_src = compileDSL(ebnf_src, nil_scanner, get_ebnf_grammar(),
get_ebnf_transformer(), get_ebnf_compiler(branding))
return compile_python_object(DHPARSER_IMPORTS + grammar_src, 'get_\w*_grammar$')
return compile_python_object(DHPARSER_IMPORTS + grammar_src, 'get_(?:\w+_)?grammar$')
def load_compiler_suite(compiler_suite):
......@@ -272,9 +272,9 @@ def load_compiler_suite(compiler_suite):
raise AssertionError('File "' + compiler_suite + '" seems to be corrupted. '
'Please delete or repair file manually.')
# TODO: Compile in one step and pick parts from namespace later ?
scanner = compile_python_object(imports + scanner_py, 'get_\w*_scanner$')
parser = compile_python_object(imports + parser_py, 'get_\w*_grammar$')
ast = compile_python_object(imports + ast_py, 'get_\w*_transformer$')
scanner = compile_python_object(imports + scanner_py, 'get_(?:\w+_)?scanner$')
parser = compile_python_object(imports + parser_py, 'get_(?:\w+_)?grammar$')
ast = compile_python_object(imports + ast_py, 'get_(?:\w+_)?transformer$')
else:
# assume source is an ebnf grammar. Is there really any reasonable application case for this?
with logging(False):
......@@ -284,7 +284,7 @@ def load_compiler_suite(compiler_suite):
raise GrammarError('\n\n'.join(errors), source)
scanner = get_ebnf_scanner
ast = get_ebnf_transformer
compiler = compile_python_object(imports + compiler_py, 'get_\w*_compiler$')
compiler = compile_python_object(imports + compiler_py, 'get_(?:\w+_)?compiler$')
return scanner, parser, ast, compiler
......@@ -355,7 +355,7 @@ def compile_on_disk(source_file, compiler_suite="", extension=".xml"):
source_file(str): The file name of the source text to be
compiled.
compiler_suite(str): The file name of the compiler suite
(usually ending with '_compiler.py'), with which the source
(usually ending with 'Compiler.py'), with which the source
file shall be compiled. If this is left empty, the source
file is assumed to be an EBNF-Grammar that will be compiled
with the internal EBNF-Compiler.
......@@ -391,14 +391,14 @@ def compile_on_disk(source_file, compiler_suite="", extension=".xml"):
DHPARSER_MAIN, DHPARSER_IMPORTS
f = None
try:
f = open(rootname + '_compiler.py', 'r', encoding="utf-8")
f = open(rootname + 'Compiler.py', 'r', encoding="utf-8")
source = f.read()
sections = RX_SECTION_MARKER.split(source)
intro, imports, scanner, parser, ast, compiler, outro = sections
except (PermissionError, FileNotFoundError, IOError) as error:
intro, imports, scanner, parser, ast, compiler, outro = '', '', '', '', '', '', ''
except ValueError as error:
raise ValueError('File "' + rootname + '_compiler.py" seems to be corrupted. '
raise ValueError('File "' + rootname + 'Compiler.py" seems to be corrupted. '
'Please delete or repair file manually!')
finally:
if f:
......@@ -419,7 +419,7 @@ def compile_on_disk(source_file, compiler_suite="", extension=".xml"):
compiler = compiler1.gen_compiler_skeleton()
try:
f = open(rootname + '_compiler.py', 'w', encoding="utf-8")
f = open(rootname + 'Compiler.py', 'w', encoding="utf-8")
f.write(intro)
f.write(SECTION_MARKER.format(marker=SYMBOLS_SECTION))
f.write(imports)
......@@ -434,7 +434,7 @@ def compile_on_disk(source_file, compiler_suite="", extension=".xml"):
f.write(SECTION_MARKER.format(marker=END_SECTIONS_MARKER))
f.write(outro)
except (PermissionError, FileNotFoundError, IOError) as error:
print('# Could not write file "' + rootname + '_compiler.py" because of: '
print('# Could not write file "' + rootname + 'Compiler.py" because of: '
+ "\n# ".join(str(error).split('\n)')))
print(result)
finally:
......
......@@ -241,13 +241,13 @@ def get_ebnf_transformer():
########################################################################
SCANNER_FACTORY = '''
def get_{NAME}_scanner():
def get_scanner():
return {NAME}Scanner
'''
GRAMMAR_FACTORY = '''
def get_{NAME}_grammar():
def get_grammar():
global thread_local_{NAME}_grammar_singleton
try:
grammar = thread_local_{NAME}_grammar_singleton
......@@ -259,14 +259,13 @@ def get_{NAME}_grammar():
TRANSFORMER_FACTORY = '''
def get_{NAME}_transformer():
def get_transformer():
return {NAME}Transform
'''
COMPILER_FACTORY = '''
def get_{NAME}_compiler(grammar_name="{NAME}",
grammar_source=""):
def get_compiler(grammar_name="{NAME}", grammar_source=""):
global thread_local_{NAME}_compiler_singleton
try:
compiler = thread_local_{NAME}_compiler_singleton
......
......@@ -89,7 +89,7 @@ def mock_syntax_tree(sexpr):
def recompile_grammar(ebnf_filename, query_remove_error_files=True):
"""Recompiles an ebnf-grammar if necessary, that is if either no
corresponding 'XXXX_compiler.py'-file exists or if that file is
corresponding 'XXXXCompiler.py'-file exists or if that file is
outdated.
Parameters:
......@@ -105,7 +105,7 @@ def recompile_grammar(ebnf_filename, query_remove_error_files=True):
return
base, ext = os.path.splitext(ebnf_filename)
compiler_name = base + '_compiler.py'
compiler_name = base + 'Compiler.py'
errors = []
if (not os.path.exists(compiler_name) or
grammar_changed(compiler_name, ebnf_filename)):
......@@ -235,8 +235,7 @@ def grammar_suite(directory, parser_factory, transformer_factory, ignore_unknown
if errata:
all_errors[filename] = errata
except ValueError as e:
if (not ignore_unknown_filetypes or
str(e).find("Unknown") < 0):
if not ignore_unknown_filetypes or str(e).find("Unknown") < 0:
raise e
error_report = []
if all_errors:
......
......@@ -7,26 +7,32 @@ latexdoc = preamble document
preamble = { command }+
genericenv = beginenv sequence §endenv
beginenv = "\begin" §( "{" name "}" )
endenv = "\end" §( "{" ::name "}" )
beginenv = "\begin" §( "{" NAME "}" )
endenv = "\end" §( "{" ::NAME "}" )
name = /\w+/~
comand = cmdname [ config ] block
cmdname = /\\\w+/
command = CMDNAME [ config ] block
config = "[" cfgtext §"]"
blockcmd = "\section" | "\subsection"
plaincmd = !blockcmd
sequence = { partext | parblock }
parblock = "{" { partext | parblock } §"}"
block = "{" { text | block } §"}"
partext = text | PARSEP
text = cfgtext | brackets
cfgtext = chunk | escaped | WSPC
text = cfgtext | BRACKETS
cfgtext = TEXTCHUNK | ESCAPED | WSPC
blockcmd = "\subsection" | "\section" | "\chapter" | "\subsubsection"
| "\paragraph" | "\subparagraph" | "\begin{enumerate}" | "\begin{itemize}"
CMDNAME = /\\\w+/~
NAME = /\w+/~
ESCAPED = /\\[%$&]/
BRACKET = /[\[\]]/ # left or right square bracket: [ ]
BRACKETS = /[\[\]]/ # left or right square bracket: [ ]
TEXTCHUNK = /[^\\%$&\{\}\[\]\s\n]+/ # some piece of text excluding whitespace,
# linefeed and special characters
WSPC = /[ \t]*\n?(?!\s*\n)[ \t]*/ # whitespace, including at most one linefeed
......
#!/usr/bin/python3
"""recompile_grammar.py - recompiles any .ebnf files in the current
directory if necessary
Author: Eckhart Arnold <arnold@badw.de>
Copyright 2017 Bavarian Academy of Sciences and Humanities
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from DHParser.testing import recompile_grammar
recompile_grammar('.')
#!/usr/bin/python3
"""test_LaTeX_grammar.py - runs the unit tests for the LaTeX grammar
Author: Eckhart Arnold <arnold@badw.de>
Copyright 2017 Bavarian Academy of Sciences and Humanities
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from DHParser import testing
from MLW_compiler import get_MLW_grammar, get_MLW_transformer
error_report = testing.grammar_suite('grammar_tests', get_LaTeX_grammar, get_LaTeX_transformer)
assert not error_report, error_report
#!/usr/bin/python
#######################################################################
#
# SYMBOLS SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
from functools import partial
import os
import sys
try:
import regex as re
except ImportError:
import re
from DHParser.toolkit import logging, is_filename, load_if_file
from DHParser.parsers import GrammarBase, CompilerBase, nil_scanner, \
Lookbehind, Lookahead, Alternative, Pop, Required, Token, \
Optional, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Sequence, RE, Capture, \
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source
from DHParser.syntaxtree import Node, traverse, remove_enclosing_delimiters, \
remove_children_if, reduce_single_child, replace_by_single_child, remove_whitespace, \
no_operation, remove_expendables, remove_tokens, flatten, is_whitespace, is_expendable, \
WHITESPACE_PTYPE, TOKEN_PTYPE
#######################################################################
#
# SCANNER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def MLWScanner(text):
return text
def get_MLW_scanner():
return MLWScanner
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class MLWGrammar(GrammarBase):
r"""Parser for a MLW source file, with this grammar:
# EBNF-Syntax für MLW-Artikel
@ comment = /#.*(?:\n|$)/ # Kommentare beginnen mit '#' und reichen bis zum Zeilenende
@ whitespace = /[\t ]*/ # Zeilensprünge zählen nicht als Leerraum
@ literalws = right # Leerraum vor und nach Literalen wird automatisch entfernt
##############################################################################
Artikel = [LZ]
§LemmaPosition
[ArtikelKopf]
§BedeutungsPosition
§Autorinfo
[LZ] DATEI_ENDE
#### LEMMA-POSITION ##########################################################
LemmaPosition = "LEMMA" [LZ] §HauptLemma §TR [LemmaVarianten] §GrammatikPosition
HauptLemma = [klassisch] [gesichert] lemma
klassisch = "*"
gesichert = "$"
LemmaVarianten = [LZ]
{ lemma §TR }+
[LemmaZusatz §ABS]
lemma = LAT_WORT_TEIL { ("|" | "-") LAT_WORT_TEIL }
LemmaZusatz = "ZUSATZ" §lzs_typ
lzs_typ = /sim\./
## GRAMMATIK-POSITION ##
GrammatikPosition = "GRAMMATIK" [LZ] §wortart §ABS §flexion [genus] §ABS
[GrammatikVarianten]
wortart = "nomen" | "n." |
"verb" | "v." |
"adverb" | "adv." |
"adjektiv" | "adj."
GrammatikVarianten = { [wortart ABS] flexion [genus] ":" Beleg §ABS }+
flexion = FLEX { "," §FLEX }
FLEX = /-?[a-z]+/~
genus = "maskulinum" | "m." |
"femininum" | "f." |
"neutrum" | "n."
#### ARTIKEL-KOPF ############################################################
ArtikelKopf = SchreibweisenPosition
SchreibweisenPosition = "SCHREIBWEISE" [LZ] §SWTyp ":" [LZ]
§SWVariante { ABS SWVariante} [LZ]
SWTyp = "script." | "script. fat-"
SWVariante = Schreibweise ":" Beleg
Schreibweise = "vizreg-" | "festregel(a)" | "fezdregl(a)" | "fat-"
#### BEDEUTUNGS-POSITION #####################################################
BedeutungsPosition = { "BEDEUTUNG" [LZ] §Bedeutung }+
Bedeutung = (Interpretamente | Bedeutungskategorie) [Belege]
Bedeutungskategorie = /(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+/~ [LZ]
Interpretamente = LateinischeBedeutung [LZ] §DeutscheBedeutung [LZ]
LateinischeBedeutung = "LAT" /(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+/~
DeutscheBedeutung = "DEU" /(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+/~
Belege = "BELEGE" [LZ] { "*" EinBeleg }
EinBeleg = { !([LZ] ("*" | "BEDEUTUNG" | "AUTOR" | "NAME" | "ZUSATZ"))
/\s*.*\s*/ }+
[Zusatz]
Zusatz = "ZUSATZ" /\s*.*/ ABS
#### AUTOR/AUTORIN ###########################################################
Autorinfo = ("AUTORIN" | "AUTOR") Name
Name = { NAME | NAMENS_ABKÜRZUNG }+
#### GENERISCHE UND ATOMARE AUSDRÜCKE ########################################
Beleg = Verweis
Verweis = ZielName
VerweisZiel = "[" ZielName "]"
ZielName = ZEICHENFOLGE
NAMENS_ABKÜRZUNG = /[A-ZÄÖÜÁÀÂÓÒÔÚÙÛ]\./~
NAME = /[A-ZÄÖÜÁÀÓÒÚÙÂÔÛ][a-zäöüßáàâóòôúùû]+/~
DEU_WORT = /[A-ZÄÖÜ]?[a-zäöüß]+/~
DEU_GROSS = /[A-ZÄÖÜ][a-zäöüß]+/~
DEU_KLEIN = /[a-zäöüß]+/~
LAT_WORT = /[a-z]+/~
LAT_WORT_TEIL = /[a-z]+/
GROSSSCHRIFT = /[A-ZÄÖÜ]+/~
ZEICHENFOLGE = /\w+/~
TR = ABS | LZ # (beliebiger) Trenner
ABS = /\s*;\s*/ | { ZW }+ # Abschluss (durch Semikolon oder Zeilenwechsel)
ZW = /\n/~ # Zeilenwechsel
LZ = /\s+/ # Leerzeichen oder -zeilen
DATEI_ENDE = !/./
NIEMALS = /(?!.)/
"""
wortart = Forward()
source_hash__ = "d953e1f653ac37c660274f1c1dbbd7e2"
parser_initialization__ = "upon instatiation"
COMMENT__ = r'#.*(?:\n|$)'
WSP__ = mixin_comment(whitespace=r'[\t ]*', comment=r'#.*(?:\n|$)')
wspL__ = ''
wspR__ = WSP__
NIEMALS = RE('(?!.)', wR='')
DATEI_ENDE = NegativeLookahead(RE('.', wR=''))
LZ = RE('\\s+', wR='')
ZW = RE('\\n')
ABS = Alternative(RE('\\s*;\\s*', wR=''), OneOrMore(ZW))
TR = Alternative(ABS, LZ)
ZEICHENFOLGE = RE('\\w+')
GROSSSCHRIFT = RE('[A-ZÄÖÜ]+')
LAT_WORT_TEIL = RE('[a-z]+', wR='')
LAT_WORT = RE('[a-z]+')
DEU_KLEIN = RE('[a-zäöüß]+')
DEU_GROSS = RE('[A-ZÄÖÜ][a-zäöüß]+')
DEU_WORT = RE('[A-ZÄÖÜ]?[a-zäöüß]+')
NAME = RE('[A-ZÄÖÜÁÀÓÒÚÙÂÔÛ][a-zäöüßáàâóòôúùû]+')
NAMENS_ABKÜRZUNG = RE('[A-ZÄÖÜÁÀÂÓÒÔÚÙÛ]\\.')
ZielName = ZEICHENFOLGE
VerweisZiel = Sequence(Token("["), ZielName, Token("]"))
Verweis = ZielName
Beleg = Verweis
Name = OneOrMore(Alternative(NAME, NAMENS_ABKÜRZUNG))
Autorinfo = Sequence(Alternative(Token("AUTORIN"), Token("AUTOR")), Name)
Zusatz = Sequence(Token("ZUSATZ"), RE('\\s*.*', wR=''), ABS)
EinBeleg = Sequence(OneOrMore(Sequence(NegativeLookahead(Sequence(Optional(LZ), Alternative(Token("*"), Token("BEDEUTUNG"), Token("AUTOR"), Token("NAME"), Token("ZUSATZ")))), RE('\\s*.*\\s*', wR=''))), Optional(Zusatz))
Belege = Sequence(Token("BELEGE"), Optional(LZ), ZeroOrMore(Sequence(Token("*"), EinBeleg)))
DeutscheBedeutung = Sequence(Token("DEU"), RE('(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+'))
LateinischeBedeutung = Sequence(Token("LAT"), RE('(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+'))
Interpretamente = Sequence(LateinischeBedeutung, Optional(LZ), Required(DeutscheBedeutung), Optional(LZ))
Bedeutungskategorie = Sequence(RE('(?:(?![A-ZÄÖÜ][A-ZÄÖÜ]).)+'), Optional(LZ))
Bedeutung = Sequence(Alternative(Interpretamente, Bedeutungskategorie), Optional(Belege))
BedeutungsPosition = OneOrMore(Sequence(Token("BEDEUTUNG"), Optional(LZ), Required(Bedeutung)))
Schreibweise = Alternative(Token("vizreg-"), Token("festregel(a)"), Token("fezdregl(a)"), Token("fat-"))
SWVariante = Sequence(Schreibweise, Token(":"), Beleg)
SWTyp = Alternative(Token("script."), Token("script. fat-"))
SchreibweisenPosition = Sequence(Token("SCHREIBWEISE"), Optional(LZ), Required(SWTyp), Token(":"), Optional(LZ), Required(SWVariante), ZeroOrMore(Sequence(ABS, SWVariante)), Optional(LZ))
ArtikelKopf = SchreibweisenPosition
genus = Alternative(Token("maskulinum"), Token("m."), Token("femininum"), Token("f."), Token("neutrum"), Token("n."))
FLEX = RE('-?[a-z]+')
flexion = Sequence(FLEX, ZeroOrMore(Sequence(Token(","), Required(FLEX))))
GrammatikVarianten = OneOrMore(Sequence(Optional(Sequence(wortart, ABS)), flexion, Optional(genus), Token(":"), Beleg, Required(ABS)))
wortart.set(Alternative(Token("nomen"), Token("n."), Token("verb"), Token("v."), Token("adverb"), Token("adv."), Token("adjektiv"), Token("adj.")))
GrammatikPosition = Sequence(Token("GRAMMATIK"), Optional(LZ), Required(wortart), Required(ABS), Required(flexion), Optional(genus), Required(ABS), Optional(GrammatikVarianten))
lzs_typ = RE('sim\\.', wR='')
LemmaZusatz = Sequence(Token("ZUSATZ"), Required(lzs_typ))
lemma = Sequence(LAT_WORT_TEIL, ZeroOrMore(Sequence(Alternative(Token("|"), Token("-")), LAT_WORT_TEIL)))
LemmaVarianten = Sequence(Optional(LZ), OneOrMore(Sequence(lemma, Required(TR))), Optional(Sequence(LemmaZusatz, Required(ABS))))
gesichert = Token("$")
klassisch = Token("*")
HauptLemma = Sequence(Optional(klassisch), Optional(gesichert), lemma)
LemmaPosition = Sequence(Token("LEMMA"), Optional(LZ), Required(HauptLemma), Required(TR), Optional(LemmaVarianten), Required(GrammatikPosition))
Artikel = Sequence(Optional(LZ), Required(LemmaPosition), Optional(ArtikelKopf), Required(BedeutungsPosition), Required(Autorinfo), Optional(LZ), DATEI_ENDE)
root__ = Artikel
def get_MLW_grammar():
global thread_local_MLW_grammar_singleton
try:
grammar = thread_local_MLW_grammar_singleton
return grammar
except NameError:
thread_local_MLW_grammar_singleton = MLWGrammar()
return thread_local_MLW_grammar_singleton
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
MLW_AST_transformation_table = {
# AST Transformations for the MLW-grammar
"Artikel": no_operation,
"LemmaPosition": no_operation,
"HauptLemma": no_operation,
"klassisch": no_operation,
"gesichert": no_operation,
"LemmaVarianten": no_operation,
"lemma": no_operation,
"LemmaZusatz": no_operation,
"lzs_typ": no_operation,
"GrammatikPosition": no_operation,
"wortart": no_operation,
"GrammatikVariante": no_operation,
"GVariante": no_operation,
"Flexionen": no_operation,
"Flexion": no_operation,
"genus": no_operation,
"ArtikelKopf": no_operation,
"SchreibweisenPosition": no_operation,
"SWTyp": no_operation,
"SWVariante": no_operation,
"Schreibweise": no_operation,
"Beleg": no_operation,
"Verweis": no_operation,
"VerweisZiel": no_operation,
"BedeutungsPosition": no_operation,
"Bedeutung": no_operation,
"Bedeutungskategorie": no_operation,
"Interpretamente": no_operation,
"LateinischeBedeutung": no_operation,
"DeutscheBedeutung": no_operation,
"Belege": no_operation,
"EinBeleg": no_operation,
"Zusatz": no_operation,
"Autorinfo": no_operation,
"Name": no_operation,
"NAMENS_ABKÜRZUNG": no_operation,
"NAME": no_operation,
"DEU_WORT": no_operation,
"DEU_GROSS": no_operation,
"DEU_KLEIN": no_operation,
"LAT_WORT": no_operation,
"LAT_WORT_TEIL": no_operation,
"GROSSSCHRIFT": no_operation,
"TR": no_operation,
"NEUE_ZEILE": no_operation,
"LZ": no_operation,
"DATEI_ENDE": no_operation,
"NIEMALS": no_operation,
"": no_operation
}
MLWTransform = partial(traverse, processing_table=MLW_AST_transformation_table)
def get_MLW_transformer():
return MLWTransform
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
class MLWCompiler(CompilerBase):
"""Compiler for the abstract-syntax-tree of a MLW source file.
"""
def __init__(self, grammar_name="MLW", grammar_source=""):
super(MLWCompiler, self).__init__(grammar_name, grammar_source)
assert re.match('\w+\Z', grammar_name)
def on_Artikel(self, node):
return node
def on_LemmaPosition(self, node):
pass
def on_HauptLemma(self, node):
pass
def on_klassisch(self, node):
pass
def on_gesichert(self, node):
pass
def on_LemmaVarianten(self, node):
pass
def on_lemma(self, node):
pass
def on_LemmaZusatz(self, node):
pass
def on_lzs_typ(self, node):
pass
def on_GrammatikPosition(self, node):
pass
def on_wortart(self, node):
pass
def on_GrammatikVariante(self, node):
pass
def on_GVariante(self, node):
pass
def on_Flexionen(self, node):
pass
def on_Flexion(self, node):
pass
def on_genus(self, node):
pass