Commit 0e2810ff authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

- Introduction added to Readme.md

parent 78e332b0
DHParser Verions 0.7.4 (2.7.2017)
.................................
- package now includes 'dhparser' script
- more transformation primitives for AST-transformation
- various bug fixes
DHParser Version 0.7.3 (27.6.2017)
..................................
......
......@@ -112,7 +112,7 @@ if __name__ == "__main__":
print(error)
sys.exit(1)
else:
print(result)
print(result.as_xml() if isinstance(result, Node) else result)
else:
print("Usage: {NAME}Compiler.py [FILENAME]")
'''
......
......@@ -236,7 +236,6 @@ class Node:
@property # this needs to be a (dynamic) property, in case sef.parser gets updated
def tag_name(self) -> str:
return self.parser.name or self.parser.ptype
# ONLY FOR DEBUGGING: return self.parser.name + ':' + self.parser.ptype
@property
def result(self) -> StrictResultType:
......@@ -761,11 +760,15 @@ def is_token(node, tokens: AbstractSet[str] = frozenset()) -> bool:
@transformation_factory
def has_name(node, tag_names: AbstractSet[str]) -> bool:
"""Checks if node has any of a given set of `tag names`.
See property `Node.tagname`."""
return node.tag_name in tag_names
@transformation_factory
def has_content(node, contents: AbstractSet[str]) -> bool:
"""Checks if the node's content (i.e. `str(node)`) matches any of
a given set of strings."""
return str(node) in contents
......@@ -809,7 +812,7 @@ def remove_tokens(node, tokens: AbstractSet[str] = frozenset()):
@transformation_factory
def remove_children(node, tag_names: AbstractSet[str]) -> bool:
def remove_children(node, tag_names: AbstractSet[str]):
"""Removes children by 'tag name'."""
remove_children_if(node, partial(has_name, tag_names=tag_names))
......
......@@ -18,4 +18,4 @@ permissions and limitations under the License.
import os
__version__ = '0.7.3' # + '_dev' + str(os.stat(__file__).st_mtime)
__version__ = '0.7.4' # + '_dev' + str(os.stat(__file__).st_mtime)
......@@ -7,4 +7,4 @@ The above copyright notice and this permission notice shall be included in all c
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
IMPORTANT: The module DHParser/typing.py is taken from the Python 3.5 distribution and thus licensed under the Python Software Foundation License!
IMPORTANT: The module DHParser/typing34.py is taken from the Python 3.5 distribution and thus licensed under the Python Software Foundation License!
......@@ -14,16 +14,16 @@ License
DHParser is open source software under the [MIT License](https://opensource.org/licenses/MIT)
**Exception**: The module ``DHParser/typing.py`` was directly taken from the
**Exception**: The module ``DHParser/typing34.py`` was directly taken from the
Python 3.5 source code in order for DHParser to be backwards compatible
with Python 3.4. The module ``DHParser/typing.py`` is licensed under the
with Python 3.4. The module ``DHParser/typing34.py`` is licensed under the
[Python Software Foundation License Version 2](https://docs.python.org/3.5/license.html)
Sources
-------
Find the sources on [gitlab.lrz.de/badw-it/DHParser](https://gitlab.lrz.de/badw-it/) .
Find the sources on [gitlab.lrz.de/badw-it/DHParser](https://gitlab.lrz.de/badw-it/DHParser) .
Get them with:
git clone https://gitlab.lrz.de/badw-it/DHParser
......@@ -32,9 +32,12 @@ Get them with:
Purpose
-------
Domain specific languages are widespread in computer sciences, but
seem to be underused in the Digital Humanities. While DSLs are
sometimes introduced to Digital-Humanities-projects as
DHParser leverages the power of Domain specific languages for the
Digital Humanities.
Domain specific languages are widespread in
computer sciences, but seem to be underused in the Digital Humanities.
While DSLs are sometimes introduced to Digital-Humanities-projects as
[practical adhoc-solution][Müller_2016], these solutions are often
somewhat "quick and dirty". In other words they are more of a hack
than a technology. The purpose of DHParser is to introduce
......@@ -50,11 +53,18 @@ generators out there, e.g. [Añez's grako parser generator][Añez_2017],
[Eclipse XText][XText_Website]. However, DHParser is
intended as a tool that is specifically geared towards digital
humanities applications, while most existing parser generators come
from compiler construction toolkits for programming languages. Also,
DHParser shall (in the future) serve as a teching tool, which
from compiler construction toolkits for programming languages.
While I expect DSLs in computer science and DSLs in the Digital
Humanities to be quite similar as far as the technological realization
is concerned, the use cases, requirements and challenges are somewhat
different. For example, in the humanities annotating text is a central
use case, which is mostly absent in computer science treatments.
These differences might sooner or later require to develop the
DSL-construction toolkits in a different direction. Also,
DHParser shall (in the future) serve as a teaching tool, which
influences some of its design decisions such as, for example, clearly
separating the parsing, syntax-tree-transformation and compilation
stages. Also, DHParser is intended as a tool to experiment with. One
stages. Finally, DHParser is intended as a tool to experiment with. One
possible research area is, how non
[context-free grammars](https://en.wikipedia.org/wiki/Context-free_grammar)
such as the grammars of [TeX][tex_stackexchange_no_bnf] or
......@@ -79,18 +89,228 @@ Further (intended) use cases are:
* Digital and cross-media editions
* Digital dictionaries
Description
-----------
... comming soon ;-)
For a simple self-test run `dhparser.py` from the command line. This
compiles the EBNF-Grammer in `examples/EBNF/EBNF.ebnf` and outputs the
Python-based parser class representing that grammar. The concrete and
abstract syntax tree as well as a full and abbreviated log of the
parsing process will be stored in a sub-directory named "DEBUG".
parsing process will be stored in a sub-directory named "LOG".
Introduction
------------
*This is an introduction for absolute beginners.
Full documentation coming soon...*
Motto: *Computers enjoy XML, humans don't.*
Suppose you are a literary scientist and you would like to edit a poem
like Heinrich Heine's "Lyrisches Intermezzo". Usually, the technology
of choice would be XML and you would use an XML-Editor to write to
code something like this:
<?xml version="1.0" encoding="UTF-8" ?>
<gedicht>
<bibliographisches>
<autor gnd="118548018">Heinrich Heine</autor>
<werk href="http://www.deutschestextarchiv.de/book/show/heine_lieder_1827"
urn="nbn:de:kobv:b4-200905192211">
Buch der Lieder
</werk>
<ort gnd="4023118-5">Hamburg</ort>
<jahr>1927</jahr>
<serie>Lyrisches Intermezzo</serie>
<titel>IV.</titel>
</bibliographisches>
<text>
<strophe>
<vers>Wenn ich in deine Augen seh',</vers>
<vers>so schwindet all' mein Leid und Weh!</vers>
<vers>Doch wenn ich küsse deinen Mund,</vers>
<vers>so werd' ich ganz und gar gesund.</vers>
</strophe>
<strophe>
<vers>Wenn ich mich lehn' an deine Brust,</vers>
<vers>kommt's über mich wie Himmelslust,</vers>
<vers>doch wenn du sprichst: Ich liebe dich!</vers>
<vers>so muß ich weinen bitterlich.</vers>
</strophe>
</text>
</gedicht>
Now, while you might think that this all works well enough, there are
a few drawbacks to this approach:
- The syntax is cumbersome and the encoding not very legible to humans
working with it. (And I did not even use
[TEI-XML](http://www.tei-c.org/index.xml), yet...)
Editing and revising XML-encoded text is a pain. Just ask the
literary scientists who have to work with it.
- The XML encoding, especially TEI-XML, is often unintuitive. Only
experts understand it. Now, if you had the idea that you humanist
friend, who is not into digital technologies, might help you with
proof-reading, you better think about it again.
- There is an awful lot of typing to do: All those lengthy opening
and closing tags. This takes time...
- While looking for a good XML-Editor, you find that there hardly exist
any XML-Editors any more. (And for a reason, actually...) In
particular, there are no good open source XML-Editors.
One the other hand, there are good reasons why XML is used in the
humanities: Important encoding standards like TEI-XML are defined in
XML. It's strict syntax and the possibility to check data against a
schema help detecting and avoiding encoding errors. If the schema
is well defined, it is unambiguous, and it is easy to parse for a
computer. Most of these advantages, however, are on a technical level
and few of them are actually exclusive advantages of XML.
All in all this means, that while XML is a solid backend-technology,
it still is a pain to work with XML as a frontend-technology. This is
where DHParser comes in. It allows you to define your own domain
specific notation that is specifically tailored to your editing needs
and provides an infrastructure that - if you know a little
Python-programming - makes it very easy to convert your annotated
text into an XML-encoding of your choice. With DHParser, the same poem
above can be simply encoded like this:
Heinrich Heine <gnd:118548018>,
Buch der Lieder <urn:nbn:de:kobv:b4-200905192211>,
Hamburg <gnd:4023118-5>, 1927.
Lyrisches Intermezzo
IV.
Wenn ich in deine Augen seh',
so schwindet all' mein Leid und Weh!
Doch wenn ich küsse deinen Mund,
so werd' ich ganz und gar gesund.
Wenn ich mich lehn' an deine Brust,
kommt's über mich wie Himmelslust,
doch wenn du sprichst: Ich liebe dich!
so muß ich weinen bitterlich.
Yes, that's right. It is as simple as that. Observe, how much
more effacious a verse like "Wenn ich mich lehn' an deine Brust, /
kommt's über mich wie Himmelslust," can be if it is not uglified by
enclosing XML tags ;-)
You might now wonder
whether the second version really does encode the same information
as the XML version. How, for example, would the computer know for
sure where a verse starts and ends or a stanza or what is
title and what stanza? Well, for all these matters there exist
conventions that poets have been using for several thousand years.
For example, a verse always starts and ends in one an the same
line. There is always a gap between stanzas. And the title is always
written above the poem and not in the middle of it. So, if there is
a title at all, we can be sure that what is written in the first
line is the title and not a stanza.
DHParser is able to exploit all those hints in order to gather much the
same information as was encoded in the XML-Version. Don't believe it?
You can try: Download DHParser from the
[gitlab-repository](https://gitlab.lrz.de/badw-it/DHParser) and enter
the directory `examples/Tutorial` on the command line interface (shell).
Just run `python LyrikCompiler_example.py` (you need to have installed
[Python](https://www.python.org/) Version 3.4 or higher on your computer).
The output will be something like this:
<gedicht>
<bibliographisches>
<autor>
<namenfolge>Heinrich Heine</namenfolge>
<verknüpfung>gnd:118548018</verknüpfung>
</autor>
<werk>
<wortfolge>Buch der Lieder</wortfolge>
<verknüpfung>urn:nbn:de:kobv:b4-200905192211</verknüpfung>
</werk>
<ort>
<wortfolge>Hamburg</wortfolge>
<verknüpfung>gnd:4023118-5</verknüpfung>
</ort>
<jahr>1927</jahr>
</bibliographisches>
<serie>Lyrisches Intermezzo</serie>
<titel>IV.</titel>
<text>
<strophe>
<vers>Wenn ich in deine Augen seh',</vers>
<vers>so schwindet all' mein Leid und Weh!</vers>
<vers>Doch wenn ich küsse deinen Mund,</vers>
<vers>so werd' ich ganz und gar gesund.</vers>
</strophe>
<strophe>
<vers>Wenn ich mich lehn' an deine Brust,</vers>
<vers>kommt's über mich wie Himmelslust,</vers>
<vers>doch wenn du sprichst: Ich liebe dich!</vers>
<vers>so muß ich weinen bitterlich.</vers>
</strophe>
</text>
</gedicht>
Now, you might notice that this is not exactly the XML-encoding as shown
above. (Can you spot the differences?) But you will probably believe me
without further proof that it can easily be converted into the other
version and contains all the information that the other version contains.
How does DHParser achieve this? Well, there is the rub. In order to convert
the poem in the domain specific version into the XML-version, DHParser
requires a structural description of the domain specific encoding. This
is a bit similar to a document type definition (DTD) in XML. This
structural description uses a slightly enhanced version of the
[Extended-Backus-Naur-Form (EBNF)](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form)
that is a well established formalism for the structural description of
formal languages in computer sciences. And excerpt of the EBNF-definition
of our domain-specific encoding for the poem looks like this. (We leave out
the meta-data here. See
[`examples/Tutorial/Lyrik.ebnf`](https://gitlab.lrz.de/badw-it/DHParser/blob/master/examples/Tutorial/Lyrik.ebnf)
for the full EBNF):
gedicht = { LEERZEILE }+ [serie] §titel §text /\s*/ §ENDE
serie = !(titel vers NZ vers) { NZ zeile }+ { LEERZEILE }+
titel = { NZ zeile}+ { LEERZEILE }+
zeile = { ZEICHENFOLGE }+
text = { strophe {LEERZEILE} }+
strophe = { NZ vers }+
vers = { ZEICHENFOLGE }+
ZEICHENFOLGE = /[^ \n<>]+/~
NZ = /\n/~
LEERZEILE = /\n[ \t]*(?=\n)/~
ENDE = !/./
Now, without going into too much detail here, let me just explain a few basics of
this formal description: The slashes `/` enclose ordinary regular expressions.
Thus, `NZ` for ("Neue Zeile", German for: "new line") is defined as `/\n/~` which
is the newline-token `\n` in a regular expression, plus further horizontal
whitespace (signified by the tilde `~`), if there is any.
The braces `{` `}` enclose items that can be repeated zero or more times; with
a `+` appended to the closing brace it means one or more times. Now, look at the
definition of `text` in the 6th line: `{ strophe {LEERZEILE} }+`. This reads
as follows: The text of the poem consists of a sequence of stanzas, each of which
is followed by a sequence of empty lines (German: "Leerzeilen"). If you now
look a the structural definition of a stanza, you find that it consists of a
sequence of verses, each of which starts, i.e. is preceeded by a new line.
Can you figure out the rest? Hint: The angular brackets `[` and `]` mean that and
item is optional and the `§` sign means that it is obligatory. (Strictly speaking,
the §-signs are not necessary, because an item that is not optional is always
obligatory, but the §-signs help the converter to produce the right error
messages.)
This should be enough for an introduction. It has shown the probably most important
use case of DHParser, i.e. as a frontend-technology form XML-encodings. Of course
it can just as well be used as a frontend for any other kind of structured data,
like SQL or graph-strcutured data. The latter is by the way is a very reasonable
alternative to XML for edition projects with a complex transmission history.
See Andreas Kuczera's Blog-entry on
["Graphdatenbanken für Historiker"](http://mittelalter.hypotheses.org/5995).
References
......@@ -167,6 +387,12 @@ München 2016. Short-URL: [tiny.badw.de/2JVy][Müller_2016]
[Müller_2016]: https://f.hypotheses.org/wp-content/blogs.dir/1856/files/2016/12/Mueller_Anzeichnung_10_Vortrag_M%C3%BCnchen.pdf
Markus Voelter, Sbastian Benz, Christian Dietrich, Birgit Engelmann,
Mats Helander, Lennart Kats, Eelco Visser, Guido Wachsmuth:
DSL Engineering. Designing, Implementing and Using Domain-Specific Languages, 2013.
[http://dslbook.org/][Voelter_2013]
[voelter_2013]: http://dslbook.org/
[tex_stackexchange_no_bnf]: http://tex.stackexchange.com/questions/4201/is-there-a-bnf-grammar-of-the-tex-language
......
#!/bin/sh
python3 setup.py sdist
python3 setup.py sdist bdist
......@@ -36,12 +36,12 @@ CMDNAME = /\\\w+/~
NAME = /\w+/~
ESCAPED = /\\[%$&]/
BRACKETS = /[\[\]]/ # left or right square bracket: [ ]
TEXTCHUNK = /[^\\%$&\{\}\[\]\s\n]+/ # some piece of text excluding whitespace,
# linefeed and special characters
WSPC = /[ \t]+/ # (horizontal) whitespace
LF = !PARSEP /[ \t]*\n[ \t]*/ # LF but not an empty line
PARSEP = /[ \t]*\n[ \t]*\n[ \t]*/ # at least one empty line, i.e.
# [whitespace] linefeed [whitespace] linefeed
BRACKETS = /[\[\]]/ # left or right square bracket: [ ]
TEXTCHUNK = /[^\\%$&\{\}\[\]\s\n]+/ # some piece of text excluding whitespace,
# linefeed and special characters
WSPC = /[ \t]+/ # (horizontal) whitespace
LF = !PARSEP /[ \t]*\n[ \t]*/ # LF but not an empty line
PARSEP = /[ \t]*(?:\n[ \t]*)+\n[ \t]*/ # at least one empty line, i.e.
# [whitespace] linefeed [whitespace] linefeed
EOF = !/./
......@@ -13,6 +13,7 @@
Professoren, Philister und Vieh; welche vier Stände doch nichts weniger
als streng geschieden sind. Der Viehstand ist der bedeutendste.
Im allgemeinen werden die Bewohner Göttingens eingeteilt in Studenten,
Professoren, Philister und Vieh; welche vier Stände doch nichts weniger
als streng geschieden sind. Der Viehstand ist der bedeutendste.
......
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
Lyrik_AST_transformation_table = {
# AST Transformations for the Lyrik-grammar
"+": remove_empty,
"bibliographisches":
[remove_children('NZ'), remove_tokens],
"autor": [],
"werk": [],
"untertitel": [],
"ort": [],
"jahr":
[reduce_single_child],
"wortfolge":
[reduce_children(partial(has_name, tag_names='WORT')), remove_last(is_whitespace), collapse],
"namenfolge":
[reduce_children(partial(has_name, tag_names='NAME')), remove_last(is_whitespace),
collapse],
"verknüpfung":
[remove_tokens('<', '>'), reduce_single_child],
"ziel":
reduce_single_child,
"gedicht, strophe, text":
[flatten, remove_children('LEERZEILE'), remove_children('NZ')],
"titel, serie":
[flatten, remove_children('LEERZEILE'), remove_children('NZ'), collapse],
"zeile": [],
"vers":
collapse,
"WORT": [],
"NAME": [],
"ZEICHENFOLGE":
reduce_single_child,
"NZ":
reduce_single_child,
"LEERZEILE": [],
"JAHRESZAHL":
[reduce_single_child],
"ENDE": [],
":Whitespace":
map_content(lambda node : " "),
":Token, :RE":
reduce_single_child,
"*": replace_by_single_child
}
\ No newline at end of file
#!/usr/bin/python
#######################################################################
#
# SYMBOLS SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
from functools import partial
import os
import sys
sys.path.append('../../')
try:
import regex as re
except ImportError:
import re
from DHParser.toolkit import logging, is_filename, load_if_file
from DHParser.parsers import Grammar, Compiler, nil_scanner, \
Lookbehind, Lookahead, Alternative, Pop, Required, Token, \
Optional, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, RE, Capture, \
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \
ScannerFunc, Synonym
from DHParser.syntaxtree import Node, traverse, remove_last, remove_first, \
remove_children_if, reduce_single_child, replace_by_single_child, remove_whitespace, \
remove_expendables, remove_tokens, flatten, is_whitespace, is_expendable, \
collapse, map_content, WHITESPACE_PTYPE, TOKEN_PTYPE, TransformationFunc, \
remove_children, remove_empty, reduce_children, has_content, has_name
#######################################################################
#
# SCANNER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def LyrikScanner(text):
return text
def get_scanner() -> ScannerFunc:
return LyrikScanner
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class LyrikGrammar(Grammar):
r"""Parser for a Lyrik source file, with this grammar:
gedicht = bibliographisches { LEERZEILE }+ [serie] §titel §text /\s*/ §ENDE
bibliographisches = autor §"," [NZ] werk §"," [NZ] ort §"," [NZ] jahr §"."
autor = namenfolge [verknüpfung]
werk = wortfolge ["." §untertitel] [verknüpfung]
untertitel = wortfolge [verknüpfung]
ort = wortfolge [verknüpfung]
jahr = JAHRESZAHL
wortfolge = { WORT }+
namenfolge = { NAME }+
verknüpfung = "<" ziel ">"
ziel = ZEICHENFOLGE
serie = !(titel vers NZ vers) { NZ zeile }+ { LEERZEILE }+
titel = { NZ zeile}+ { LEERZEILE }+
zeile = { ZEICHENFOLGE }+
text = { strophe {LEERZEILE} }+
strophe = { NZ vers }+
vers = { ZEICHENFOLGE }+
WORT = /\w+/~
NAME = /\w+\.?/~
ZEICHENFOLGE = /[^ \n<>]+/~
NZ = /\n/~
LEERZEILE = /\n[ \t]*(?=\n)/~
JAHRESZAHL = /\d\d\d\d/~
ENDE = !/./
"""
source_hash__ = "a2832bea27ad1a4e48e87ad7b1cef2c3"
parser_initialization__ = "upon instantiation"
COMMENT__ = r''
WSP__ = mixin_comment(whitespace=r'[\t ]*', comment=r'')
wspL__ = ''
wspR__ = WSP__
ENDE = NegativeLookahead(RE('.', wR=''))
JAHRESZAHL = RE('\\d\\d\\d\\d')
LEERZEILE = RE('\\n[ \\t]*(?=\\n)')
NZ = RE('\\n')
ZEICHENFOLGE = RE('[^ \\n<>]+')
NAME = RE('\\w+\\.?')
WORT = RE('\\w+')
vers = OneOrMore(ZEICHENFOLGE)
strophe = OneOrMore(Series(NZ, vers))
text = OneOrMore(Series(strophe, ZeroOrMore(LEERZEILE)))
zeile = OneOrMore(ZEICHENFOLGE)
titel = Series(OneOrMore(Series(NZ, zeile)), OneOrMore(LEERZEILE))
serie = Series(NegativeLookahead(Series(titel, vers, NZ, vers)), OneOrMore(Series(NZ, zeile)), OneOrMore(LEERZEILE))
ziel = Synonym(ZEICHENFOLGE)
verknüpfung = Series(Token("<"), ziel, Token(">"))
namenfolge = OneOrMore(NAME)
wortfolge = OneOrMore(WORT)
jahr = Synonym(JAHRESZAHL)
ort = Series(wortfolge, Optional(verknüpfung))
untertitel = Series(wortfolge, Optional(verknüpfung))
werk = Series(wortfolge, Optional(Series(Token("."), Required(untertitel))), Optional(verknüpfung))
autor = Series(namenfolge, Optional(verknüpfung))
bibliographisches = Series(autor, Required(Token(",")), Optional(NZ), werk, Required(Token(",")), Optional(NZ), ort,
Required(Token(",")), Optional(NZ), jahr, Required(Token(".")))
gedicht = Series(bibliographisches, OneOrMore(LEERZEILE), Optional(serie), Required(titel), Required(text),
RE('\\s*', wR=''), Required(ENDE))
root__ = gedicht
def get_grammar() -> LyrikGrammar:
global thread_local_Lyrik_grammar_singleton
try:
grammar = thread_local_Lyrik_grammar_singleton
return grammar
except NameError:
thread_local_Lyrik_grammar_singleton = LyrikGrammar()
return thread_local_Lyrik_grammar_singleton
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def halt(node):
assert False
Lyrik_AST_transformation_table = {
# AST Transformations for the Lyrik-grammar
"+": remove_empty,
"bibliographisches":
[remove_children('NZ'), remove_tokens],
"autor": [],
"werk": [],
"untertitel": [],
"ort": [],
"jahr":
[reduce_single_child],
"wortfolge":
[reduce_children(has_name('WORT')), remove_last(is_whitespace), collapse],
"namenfolge":
[reduce_children(has_name('NAME')), remove_last(is_whitespace), collapse],
"verknüpfung":
[remove_tokens('<', '>'), reduce_single_child],
"ziel":
reduce_single_child,
"gedicht, strophe, text":
[flatten, remove_children('LEERZEILE'), remove_children('NZ')],
"titel, serie":
[flatten, remove_children('LEERZEILE'), remove_children('NZ'), collapse],
"zeile": [],
"vers":
collapse,
"WORT": [],
"NAME": [],
"ZEICHENFOLGE":
reduce_single_child,
"NZ":
reduce_single_child,