11.3.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit e45f01a4 authored by Eckhart Arnold's avatar Eckhart Arnold

examples/Tutorial fixed

parent 630efad5
......@@ -77,7 +77,7 @@ from DHParser import logging, is_filename, load_if_file, \\
last_value, counterpart, accumulate, PreprocessorFunc, \\
Node, TransformationFunc, TransformationDict, \\
traverse, remove_children_if, merge_children, is_anonymous, \\
content_from_sinlge_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \\
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
remove_nodes, remove_content, remove_brackets, replace_parser, \\
......
......@@ -27,7 +27,7 @@ from DHParser.parsers import Grammar, mixin_comment, nil_preprocessor, Forward,
from DHParser.syntaxtree import Node, TransformationFunc, WHITESPACE_PTYPE, TOKEN_PTYPE
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, typing
from DHParser.transform import traverse, remove_brackets, \
content_from_sinlge_child, replace_by_single_child, remove_expendables, \
reduce_single_child, replace_by_single_child, remove_expendables, \
remove_tokens, flatten, forbid, assert_content, remove_infix_operator
from DHParser.versionnumber import __version__
from typing import Callable, Dict, List, Set, Tuple
......@@ -212,12 +212,12 @@ EBNF_AST_transformation_table = {
"unordered":
remove_brackets,
"oneormore, repetition, option":
[content_from_sinlge_child, remove_brackets,
[reduce_single_child, remove_brackets,
forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
"symbol, literal, regexp":
content_from_sinlge_child,
reduce_single_child,
(TOKEN_PTYPE, WHITESPACE_PTYPE):
content_from_sinlge_child,
reduce_single_child,
"list_":
[flatten, remove_infix_operator],
"*":
......@@ -438,9 +438,9 @@ class EBNFCompiler(Compiler):
if rule.startswith('Alternative'):
transformations = '[replace_or_reduce]'
elif rule.startswith('Synonym'):
transformations = '[content_from_sinlge_child]'
transformations = '[reduce_single_child]'
transtable.append(' "' + name + '": %s,' % transformations)
transtable.append(' ":Token, :RE": content_from_sinlge_child,')
transtable.append(' ":Token, :RE": reduce_single_child,')
transtable += [' "*": replace_by_single_child', '}', '']
transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
return '\n'.join(transtable)
......
This diff is collapsed.
......@@ -37,7 +37,7 @@ __all__ = ('TransformationDict',
'traverse',
'is_named',
'replace_by_single_child',
'content_from_sinlge_child',
'reduce_single_child',
'replace_or_reduce',
'replace_parser',
'collapse',
......@@ -355,7 +355,7 @@ def replace_by_single_child(context: List[Node]):
replace_by(node, node.children[0])
def content_from_sinlge_child(context: List[Node]):
def reduce_single_child(context: List[Node]):
"""
Reduces a single branch node by transferring the result of its
immediate descendant to this node, but keeping this node's parser entry.
......@@ -508,17 +508,19 @@ def is_expendable(context: List[Node]) -> bool:
return is_empty(context) or is_whitespace(context)
@transformation_factory(AbstractSet[str])
def is_token(context: List[Node], tokens: AbstractSet[str] = frozenset()) -> bool:
node = context[-1]
return node.parser.ptype == TOKEN_PTYPE and (not tokens or node.result in tokens)
@transformation_factory(AbstractSet[str])
def is_one_of(context: List[Node], tag_name_set: AbstractSet[str]) -> bool:
"""Returns true, if the node's tag_name is on of the
given tag names."""
return context[-1].tag_name in tag_name_set
@transformation_factory(str)
def has_content(context: List[Node], regexp: str) -> bool:
"""Checks a node's content against a regular expression."""
return bool(re.match(regexp, context[-1].content))
......
......@@ -25,7 +25,7 @@ from DHParser import logging, is_filename, load_if_file, \
last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, TransformationDict, TRUE_CONDITION, \
traverse, remove_children_if, merge_children, is_anonymous, \
content_from_sinlge_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, remove_nodes, remove_content, remove_brackets, replace_parser, \
keep_children, is_one_of, has_content, apply_if, remove_first, remove_last, \
......@@ -159,7 +159,7 @@ BibTeX_AST_transformation_table = {
"content": [replace_or_reduce],
"plain_content": [],
"text": [],
":Token, :RE": content_from_sinlge_child,
":Token, :RE": reduce_single_child,
"*": replace_by_single_child
}
......
......@@ -22,7 +22,7 @@ from DHParser import logging, is_filename, load_if_file, \
last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, TransformationDict, TRUE_CONDITION, \
traverse, remove_children_if, merge_children, is_anonymous, \
content_from_sinlge_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_nodes, remove_content, remove_brackets, replace_parser, \
......@@ -156,7 +156,7 @@ EBNF_AST_transformation_table = {
"regexp": [],
"list_": [],
"EOF": [],
":Token, :RE": content_from_sinlge_child,
":Token, :RE": reduce_single_child,
"*": replace_by_single_child
}
......
......@@ -21,7 +21,7 @@ from DHParser import logging, is_filename, Grammar, Compiler, Lookbehind, Altern
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \
PreprocessorFunc, TransformationDict, \
Node, TransformationFunc, traverse, remove_children_if, is_anonymous, \
content_from_sinlge_child, replace_by_single_child, remove_whitespace, \
reduce_single_child, replace_by_single_child, remove_whitespace, \
flatten, is_empty, collapse, replace_content, remove_brackets, is_one_of, remove_first, \
remove_tokens, remove_nodes, TOKEN_PTYPE
......@@ -389,10 +389,10 @@ LaTeX_AST_transformation_table = {
"latexdoc": [],
"preamble": [],
"document": [flatten_structure],
"frontpages": content_from_sinlge_child,
"frontpages": reduce_single_child,
"Chapters, Sections, SubSections, SubSubSections, Paragraphs, SubParagraphs": [],
"Chapter, Section, SubSection, SubSubSection, Paragraph, SubParagraph": [],
"heading": content_from_sinlge_child,
"heading": reduce_single_child,
"Bibliography": [],
"Index": [],
"block_environment": replace_by_single_child,
......@@ -402,14 +402,14 @@ LaTeX_AST_transformation_table = {
"itemize, enumerate": [remove_brackets, flatten],
"item": [],
"figure": [],
"quotation": [content_from_sinlge_child, remove_brackets],
"quotation": [reduce_single_child, remove_brackets],
"verbatim": [],
"tabular": [],
"tabular_config, block_of_paragraphs": [remove_brackets, content_from_sinlge_child],
"tabular_config, block_of_paragraphs": [remove_brackets, reduce_single_child],
"tabular_row": [flatten, remove_tokens('&', '\\')],
"tabular_cell": [flatten, remove_whitespace],
"multicolumn": [remove_tokens('{', '}')],
"hline": [remove_whitespace, content_from_sinlge_child],
"hline": [remove_whitespace, reduce_single_child],
"sequence": [flatten],
"paragraph": [flatten],
"text_element": replace_by_single_child,
......@@ -418,8 +418,8 @@ LaTeX_AST_transformation_table = {
"known_inline_env": replace_by_single_child,
"generic_inline_env": [],
"begin_inline_env, end_inline_env": [replace_by_single_child],
"begin_environment, end_environment": [remove_brackets, content_from_sinlge_child],
"inline_math": [remove_brackets, content_from_sinlge_child],
"begin_environment, end_environment": [remove_brackets, reduce_single_child],
"inline_math": [remove_brackets, reduce_single_child],
"command": replace_by_single_child,
"known_command": replace_by_single_child,
"text_command": [],
......@@ -427,14 +427,14 @@ LaTeX_AST_transformation_table = {
"footnote": [],
"includegraphics": [],
"caption": [],
"config": [remove_brackets, content_from_sinlge_child],
"config": [remove_brackets, reduce_single_child],
"block": [remove_brackets, flatten, replace_by_single_child],
"text": collapse,
"no_command, blockcmd": [],
"structural": [],
"CMDNAME": [remove_whitespace, content_from_sinlge_child],
"TXTCOMMAND": [remove_whitespace, content_from_sinlge_child],
"NAME": [content_from_sinlge_child, remove_whitespace, content_from_sinlge_child],
"CMDNAME": [remove_whitespace, reduce_single_child],
"TXTCOMMAND": [remove_whitespace, reduce_single_child],
"NAME": [reduce_single_child, remove_whitespace, reduce_single_child],
"ESCAPED": [replace_content(lambda node: str(node)[1:])],
"BRACKETS": [],
"TEXTCHUNK": [],
......@@ -445,7 +445,7 @@ LaTeX_AST_transformation_table = {
"BACKSLASH": [],
"EOF": [],
":Token":
[remove_whitespace, content_from_sinlge_child],
[remove_whitespace, reduce_single_child],
":RE": replace_by_single_child,
":Whitespace": streamline_whitespace,
"*": replace_by_single_child
......
......@@ -22,7 +22,7 @@ from DHParser import logging, is_filename, load_if_file, \
last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, TransformationDict, \
traverse, remove_children_if, merge_children, is_anonymous, \
content_from_sinlge_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, remove_nodes, remove_content, remove_brackets, replace_parser, \
keep_children, is_one_of, has_content, apply_if, remove_first, remove_last
......@@ -471,7 +471,7 @@ def get_grammar() -> MLWGrammar:
MLW_AST_transformation_table = {
# AST Transformations for the MLW-grammar
"+": remove_empty,
"Autor": [content_from_sinlge_child],
"Autor": [reduce_single_child],
"Artikel": [],
"LemmaPosition": [],
"Lemma": [],
......@@ -506,14 +506,14 @@ MLW_AST_transformation_table = {
"Zusatz": [],
"ArtikelVerfasser": [],
"Name": [],
"Stelle": [content_from_sinlge_child],
"Stelle": [reduce_single_child],
"SW_LAT": [replace_or_reduce],
"SW_DEU": [replace_or_reduce],
"SW_GRIECH": [replace_or_reduce],
"Beleg": [replace_by_single_child],
"Verweis": [],
"VerweisZiel": [],
"Werk": [content_from_sinlge_child],
"Werk": [reduce_single_child],
"ZielName": [replace_by_single_child],
"NAMENS_ABKÜRZUNG": [],
"NAME": [],
......@@ -539,7 +539,7 @@ MLW_AST_transformation_table = {
"KOMMENTARZEILEN": [],
"DATEI_ENDE": [],
"NIEMALS": [],
":Token, :RE": content_from_sinlge_child,
":Token, :RE": reduce_single_child,
"*": replace_by_single_child
}
......
......@@ -22,7 +22,7 @@ from DHParser import logging, is_filename, load_if_file, \
last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, TransformationDict, TRUE_CONDITION, \
traverse, remove_children_if, merge_children, is_anonymous, \
content_from_sinlge_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_nodes, remove_content, remove_brackets, replace_parser, \
......@@ -352,7 +352,7 @@ MLW_AST_transformation_table = {
"LZ": [],
"DATEI_ENDE": [],
"NIEMALS": [],
":Token, :RE": content_from_sinlge_child,
":Token, :RE": reduce_single_child,
"*": replace_by_single_child
}
......
......@@ -59,6 +59,6 @@ MLW_AST_transformation_table = {
"LZ": [],
"DATEI_ENDE": [],
"NIEMALS": [],
":Token, :RE": content_from_sinlge_child,
":Token, :RE": reduce_single_child,
"*": replace_by_single_child
}
gedicht = bibliographisches { LEERZEILE }+ [serie] §titel §text /\s*/ §ENDE
gedicht = bibliographisches { LEERZEILE }+ [serie] §titel text /\s*/ ENDE
bibliographisches = autor §"," [NZ] werk §"," [NZ] ort §"," [NZ] jahr §"."
bibliographisches = autor §"," [NZ] werk "," [NZ] ort "," [NZ] jahr "."
autor = namenfolge [verknüpfung]
werk = wortfolge ["." §untertitel] [verknüpfung]
untertitel = wortfolge [verknüpfung]
......
......@@ -16,31 +16,30 @@ try:
import regex as re
except ImportError:
import re
from DHParser.toolkit import logging, is_filename, load_if_file
from DHParser.parsers import Grammar, Compiler, nil_scanner, \
Lookbehind, Lookahead, Alternative, Pop, Required, Token, Synonym, \
Optional, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, RE, Capture, \
from DHParser import logging, is_filename, Grammar, Compiler, Lookbehind, \
Alternative, Pop, Token, Synonym, \
Option, NegativeLookbehind, OneOrMore, RegExp, Series, RE, Capture, \
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \
last_value, counterpart, accumulate, ScannerFunc
from DHParser.syntaxtree import Node, traverse, remove_children_if, \
PreprocessorFunc, TransformationDict, remove_empty, reduce_single_child, \
Node, TransformationFunc, traverse, remove_children_if, is_anonymous, \
reduce_single_child, replace_by_single_child, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, TransformationFunc, remove_parser, remove_content, remove_brackets, \
keep_children, has_name, has_content, apply_if, remove_first, remove_last
from DHParser.base import WHITESPACE_PTYPE, TOKEN_PTYPE
flatten, is_empty, collapse, replace_content, remove_brackets, \
is_one_of, remove_first, remove_last, remove_tokens, remove_nodes, \
is_whitespace, TOKEN_PTYPE
#######################################################################
#
# SCANNER SECTION - Can be edited. Changes will be preserved.
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def LyrikScanner(text):
def LyrikPreprocessor(text):
return text
def get_scanner() -> ScannerFunc:
return LyrikScanner
def get_preprocessor() -> PreprocessorFunc:
return LyrikPreprocessor
#######################################################################
......@@ -52,9 +51,9 @@ def get_scanner() -> ScannerFunc:
class LyrikGrammar(Grammar):
r"""Parser for a Lyrik source file, with this grammar:
gedicht = bibliographisches { LEERZEILE }+ [serie] §titel §text /\s*/ §ENDE
gedicht = bibliographisches { LEERZEILE }+ [serie] §titel text /\s*/ ENDE
bibliographisches = autor §"," [NZ] werk §"," [NZ] ort §"," [NZ] jahr §"."
bibliographisches = autor §"," [NZ] werk "," [NZ] ort "," [NZ] jahr "."
autor = namenfolge [verknüpfung]
werk = wortfolge ["." §untertitel] [verknüpfung]
untertitel = wortfolge [verknüpfung]
......@@ -83,13 +82,14 @@ class LyrikGrammar(Grammar):
JAHRESZAHL = /\d\d\d\d/~
ENDE = !/./
"""
source_hash__ = "5fd541c17475b7f71654ff0cda14ec6f"
source_hash__ = "5ceb5f91412cbe1bcd4dd8b7005598fb"
parser_initialization__ = "upon instantiation"
COMMENT__ = r''
WSP__ = mixin_comment(whitespace=r'[\t ]*', comment=r'')
WHITESPACE__ = r'[\t ]*'
WSP__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wspL__ = ''
wspR__ = WSP__
ENDE = NegativeLookahead(RE('.', wR=''))
ENDE = NegativeLookahead(RegExp('.'))
JAHRESZAHL = RE('\\d\\d\\d\\d')
LEERZEILE = RE('\\n[ \\t]*(?=\\n)')
NZ = RE('\\n')
......@@ -107,22 +107,22 @@ class LyrikGrammar(Grammar):
namenfolge = OneOrMore(NAME)
wortfolge = OneOrMore(WORT)
jahr = Synonym(JAHRESZAHL)
ort = Series(wortfolge, Optional(verknüpfung))
untertitel = Series(wortfolge, Optional(verknüpfung))
werk = Series(wortfolge, Optional(Series(Token("."), Required(untertitel))), Optional(verknüpfung))
autor = Series(namenfolge, Optional(verknüpfung))
bibliographisches = Series(autor, Required(Token(",")), Optional(NZ), werk, Required(Token(",")), Optional(NZ), ort, Required(Token(",")), Optional(NZ), jahr, Required(Token(".")))
gedicht = Series(bibliographisches, OneOrMore(LEERZEILE), Optional(serie), Required(titel), Required(text), RE('\\s*', wR=''), Required(ENDE))
ort = Series(wortfolge, Option(verknüpfung))
untertitel = Series(wortfolge, Option(verknüpfung))
werk = Series(wortfolge, Option(Series(Token("."), untertitel, mandatory=1)), Option(verknüpfung))
autor = Series(namenfolge, Option(verknüpfung))
bibliographisches = Series(autor, Token(","), Option(NZ), werk, Token(","), Option(NZ), ort, Token(","), Option(NZ), jahr, Token("."), mandatory=1)
gedicht = Series(bibliographisches, OneOrMore(LEERZEILE), Option(serie), titel, text, RegExp('\\s*'), ENDE, mandatory=3)
root__ = gedicht
def get_grammar() -> LyrikGrammar:
global thread_local_Lyrik_grammar_singleton
try:
grammar = thread_local_Lyrik_grammar_singleton
return grammar
except NameError:
thread_local_Lyrik_grammar_singleton = LyrikGrammar()
return thread_local_Lyrik_grammar_singleton
grammar = thread_local_Lyrik_grammar_singleton
return grammar
#######################################################################
......@@ -131,34 +131,49 @@ def get_grammar() -> LyrikGrammar:
#
#######################################################################
def halt(node):
assert False
Lyrik_AST_transformation_table = {
# AST Transformations for the Lyrik-grammar
"+": remove_empty,
"gedicht": [],
"bibliographisches": [],
"bibliographisches":
[remove_nodes('NZ'), remove_tokens],
"autor": [],
"werk": [],
"untertitel": [],
"ort": [],
"jahr": [],
"wortfolge": [],
"namenfolge": [],
"verknüpfung": [],
"ziel": [],
"serie": [],
"titel": [],
"jahr":
[reduce_single_child],
"wortfolge":
[flatten(is_one_of('WORT'), recursive=False), remove_last(is_whitespace), collapse],
"namenfolge":
[flatten(is_one_of('NAME'), recursive=False), remove_last(is_whitespace), collapse],
"verknüpfung":
[remove_tokens('<', '>'), reduce_single_child],
"ziel":
reduce_single_child,
"gedicht, strophe, text":
[flatten, remove_nodes('LEERZEILE'), remove_nodes('NZ')],
"titel, serie":
[flatten, remove_nodes('LEERZEILE'), remove_nodes('NZ'), collapse],
"zeile": [],
"text": [],
"strophe": [],
"vers": [],
"vers":
collapse,
"WORT": [],
"NAME": [],
"ZEICHENFOLGE": [],
"NZ": [],
"ZEICHENFOLGE":
reduce_single_child,
"NZ":
reduce_single_child,
"LEERZEILE": [],
"JAHRESZAHL": [],
"JAHRESZAHL":
[reduce_single_child],
"ENDE": [],
":Token, :RE": reduce_single_child,
":Whitespace":
replace_content(lambda node : " "),
":Token, :RE":
reduce_single_child,
"*": replace_by_single_child
}
......@@ -283,7 +298,7 @@ def compile_src(source):
cname = compiler.__class__.__name__
log_file_name = os.path.basename(os.path.splitext(source)[0]) \
if is_filename(source) < 0 else cname[:cname.find('.')] + '_out'
result = compile_source(source, get_scanner(),
result = compile_source(source, get_preprocessor(),
get_grammar(),
get_transformer(), compiler)
return result
......
#!/usr/bin/python
#######################################################################
#
# SYMBOLS SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
import os
import sys
from functools import partial
sys.path.append('../../')
try:
import regex as re
except ImportError:
import re
from DHParser.toolkit import logging, is_filename
from DHParser.parsers import Grammar, Compiler, Required, Token, \
Option, OneOrMore, Series, RE, ZeroOrMore, NegativeLookahead, mixin_comment, compile_source, \
PreprocessorFunc, Synonym
from DHParser.syntaxtree import Node, traverse, remove_last, \
reduce_single_child, replace_by_single_child, remove_tokens, flatten, is_whitespace, collapse, replace_content, \
TransformationFunc, \
remove_parser, remove_empty, has_name
#######################################################################
#
# SCANNER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def LyrikScanner(text):
return text
def get_scanner() -> PreprocessorFunc:
return LyrikScanner
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class LyrikGrammar(Grammar):
r"""Parser for a Lyrik source file, with this grammar:
gedicht = bibliographisches { LEERZEILE }+ [serie] §titel §text /\s*/ §ENDE
bibliographisches = autor §"," [NZ] werk §"," [NZ] ort §"," [NZ] jahr §"."
autor = namenfolge [verknüpfung]
werk = wortfolge ["." §untertitel] [verknüpfung]
untertitel = wortfolge [verknüpfung]
ort = wortfolge [verknüpfung]
jahr = JAHRESZAHL
wortfolge = { WORT }+
namenfolge = { NAME }+
verknüpfung = "<" ziel ">"
ziel = ZEICHENFOLGE
serie = !(titel vers NZ vers) { NZ zeile }+ { LEERZEILE }+
titel = { NZ zeile}+ { LEERZEILE }+
zeile = { ZEICHENFOLGE }+
text = { strophe {LEERZEILE} }+
strophe = { NZ vers }+
vers = { ZEICHENFOLGE }+
WORT = /\w+/~
NAME = /\w+\.?/~
ZEICHENFOLGE = /[^ \n<>]+/~
NZ = /\n/~
LEERZEILE = /\n[ \t]*(?=\n)/~
JAHRESZAHL = /\d\d\d\d/~
ENDE = !/./
"""
source_hash__ = "a2832bea27ad1a4e48e87ad7b1cef2c3"
parser_initialization__ = "upon instantiation"
COMMENT__ = r''
WSP__ = mixin_comment(whitespace=r'[\t ]*', comment=r'')
wspL__ = ''
wspR__ = WSP__
ENDE = NegativeLookahead(RE('.', wR=''))
JAHRESZAHL = RE('\\d\\d\\d\\d')
LEERZEILE = RE('\\n[ \\t]*(?=\\n)')
NZ = RE('\\n')
ZEICHENFOLGE = RE('[^ \\n<>]+')
NAME = RE('\\w+\\.?')
WORT = RE('\\w+')
vers = OneOrMore(ZEICHENFOLGE)
strophe = OneOrMore(Series(NZ, vers))
text = OneOrMore(Series(strophe, ZeroOrMore(LEERZEILE)))
zeile = OneOrMore(ZEICHENFOLGE)
titel = Series(OneOrMore(Series(NZ, zeile)), OneOrMore(LEERZEILE))
serie = Series(NegativeLookahead(Series(titel, vers, NZ, vers)), OneOrMore(Series(NZ, zeile)), OneOrMore(LEERZEILE))
ziel = Synonym(ZEICHENFOLGE)
verknüpfung = Series(Token("<"), ziel, Token(">"))
namenfolge = OneOrMore(NAME)
wortfolge = OneOrMore(WORT)
jahr = Synonym(JAHRESZAHL)
ort = Series(wortfolge, Option(verknüpfung))
untertitel = Series(wortfolge, Option(verknüpfung))
werk = Series(wortfolge, Option(Series(Token("."), Required(untertitel))), Option(verknüpfung))
autor = Series(namenfolge, Option(verknüpfung))
bibliographisches = Series(autor, Required(Token(",")), Option(NZ), werk, Required(Token(",")), Option(NZ), ort,
Required(Token(",")), Option(NZ), jahr, Required(Token(".")))
gedicht = Series(bibliographisches, OneOrMore(LEERZEILE), Option(serie), Required(titel), Required(text),
RE('\\s*', wR=''), Required(ENDE))
root__ = gedicht
def get_grammar() -> LyrikGrammar:
global thread_local_Lyrik_grammar_singleton
try:
grammar = thread_local_Lyrik_grammar_singleton
return grammar
except NameError:
thread_local_Lyrik_grammar_singleton = LyrikGrammar()
return thread_local_Lyrik_grammar_singleton
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def halt(node):
assert False