16.12.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit be79e2e0 authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

- renamed project to "DHParser" to avoid conflict with an existing Python...

- renamed project to "DHParser" to avoid conflict with an existing Python Project named "PyDSL"; also, it makes it clearer that this project is about the digital humanities
- Extended the README.md file
parent 3371efce
......@@ -549,11 +549,15 @@ def is_expendable(node):
return is_whitespace(node) or is_comment(node) or is_scanner_token(node)
def is_token(node, token_set={}):
return node.parser.name == TOKEN_KEYWORD and (not token_set or node.result in token_set)
def remove_children_if(node, condition):
"""Removes all nodes from the result field if the function `condition` evaluates
to `True`."""
"""Removes all nodes from the result field if the function
``condition(child_node)`` evaluates to ``True``."""
if node.children:
node.result = tuple(r for r in node.result if not condition(r))
node.result = tuple(c for c in node.children if not condition(c))
remove_whitespace = partial(remove_children_if, condition=is_whitespace)
......@@ -562,6 +566,14 @@ remove_scanner_tokens = partial(remove_children_if, condition=is_scanner_token)
remove_expendables = partial(remove_children_if, condition=is_expendable)
def remove_tokens(node, tokens=set()):
"""Reomoves any among a particular set of tokens from the immediate
descendants of a node. If ``tokens`` is the empty set, all tokens
are removed.
"""
remove_children_if(node, partial(is_token, token_set=tokens))
def flatten(node):
"""Recursively flattens all unnamed sub-nodes, in case there is more
than one sub-node present. Flattening means that
......@@ -582,28 +594,12 @@ def flatten(node):
node.result = tuple(new_result)
def remove_tokens(node, tokens=set()):
"""Reomoves any among a particular set of tokens from the immediate
descendants of a node. If ``tokens`` is the empty set, all tokens
are removed.
"""
if node.children:
if tokens:
node.result = tuple(child for child in node.children
if child.parser.name != TOKEN_KEYWORD or
child.result not in tokens)
else:
node.result = tuple(child for child in node.children
if child.parser.name != TOKEN_KEYWORD)
def remove_enclosing_delimiters(node):
"""Removes the enclosing delimiters from a structure (e.g. quotation marks
def remove_brackets(node):
"""Removes any enclosing delimiters from a structure (e.g. quotation marks
from a literal or braces from a group).
"""
if len(node.children) >= 3:
assert isinstance(node.children[0].result, str) and \
isinstance(node.children[-1].result, str), node.as_sexpr()
assert not node.children[0].children and not node.children[-1].children, node.as_sexpr()
node.result = node.result[1:-1]
......@@ -1487,23 +1483,25 @@ class EBNFGrammar(GrammarBase):
root__ = syntax
remove_enclosing_delimiters = partial(remove_tokens, tokens={})
EBNFTransTable = {
# AST Transformations for EBNF-grammar
"syntax":
remove_expendables,
"directive, definition":
partial(remove_tokens, tokens={'@', '='}),
"expression":
"expression, chain":
[replace_by_single_child, flatten,
partial(remove_tokens, tokens={'|'})],
partial(remove_tokens, tokens={'|', '--'})],
"term":
[replace_by_single_child, flatten], # supports both idioms: "{ factor }+" and "factor { factor }"
"factor, flowmarker, retrieveop":
replace_by_single_child,
"group":
[remove_enclosing_delimiters, replace_by_single_child],
[remove_brackets, replace_by_single_child],
"oneormore, repetition, option":
[reduce_single_child, remove_enclosing_delimiters],
[reduce_single_child, remove_brackets],
"symbol, literal, regexp, list_":
[remove_expendables, reduce_single_child],
(TOKEN_KEYWORD, WHITESPACE_KEYWORD):
......@@ -2160,6 +2158,8 @@ def source_changed(grammar_source, grammar_class):
def test(file_name):
global DEBUG
DEBUG = "DEBUG"
print(file_name)
with open('examples/' + file_name, encoding="utf-8") as f:
grammar = f.read()
......
PyDSL
=====
DHParser
========
A parser combinator based parsing and compiling infrastructure for domain
specific languages (DSL) in Digital Humanities projects.
Author: Eckhart Arnold, Bavarian Academy of Sciences
Email: arnold@badw.de
......@@ -9,21 +11,121 @@ Email: arnold@badw.de
License
-------
PyDSL is open source software under the [MIT License](https://opensource.org/licenses/MIT)
DHParser is open source software under the [MIT License](https://opensource.org/licenses/MIT)
Purpose
-------
Domain specific languages are widespread in computer sciences, but strangely underused in the
Digital Humanities. While DSLs are often introduced to Digital-Humanities-projects as
[practical adhoc-solution][Müller_2016], these solutions are often somewhat "quick and dirty". In other words they are
more of a hack than a technology. The purpose of DHParsers is to introduce
[DSLs as a technology][Arnold_2016] to the Digital Humanities. It is based on the well known technology of
[EBNF][ISO_IEC_14977]-based parser generators, but employs the more modern form called "[parsing expression grammar][Ford_2004]"
and [parser combinators][Ford_20XX] as a variant of the classical recursive descent parser.
Why another parser generator? There are plenty of good parser generators out there, e.g. [Añez_2017]. However, DHParser is
intended as a tool that is specifically geared towards digital humanities applications, while most existing parser
generators come from compiler construction toolkits for programming languages. Also, DHParser shall (in the future)
serve as a teching tool, which influences some of its design decisions such as, for example, clearly separating
the parsing, syntax-tree-transformation and compilation stages. Also, DHParser is intended as a tool to experiment with.
One possible research area is, how non [context-free grammars](https://en.wikipedia.org/wiki/Context-free_grammar)
such as the grammars of [TeX][tex_stackexchange_no_bnf] or [CommonMark][MacFarlane_et_al_2017] can be described with
declarative langauges in the spirit of but beyond EBNF, and what extensions of the parsing technology are necessary to
capture such languages.
Primary use case at the Bavarian Academy of Sciences and Humanities (for the time being):
A DSL for the "[Mittellateinische Wörterbuch](http://www.mlw.badw.de/)"!
Further (intended) use cases are:
* LaTeX -> XML/HTML conversion. See this [discussion on why an EBNF-parser for the complete TeX/LaTeX-grammar][tex_stackexchange_no_bnf]
is not possible.
* [CommonMark][MacFarlane_et_al_2017] and other DSLs for cross media publishing of scientific literature, e.g. journal articles.
(Common Mark and Markdown also go beyond what is feasible with pure EBNF-based-parsers.)
* EBNF itself. DHParser is already self-hosting ;-)
* Digital and cross-media editions
* Digital dictionaries
Description
-----------
... comming soon ;-)
A parser combinator based parsing and compiling infrastructure for domain
specific languages (DSL) in python.
For a simple self-test run `ParserCombinators.py` from the command line. This compiles the EBNF-Grammer in
`examples/EBNF/EBNF.ebnf` and outputs the Python-based parser class representing that grammar. The concrete and abstract
syntax tree as well as a full and abbreviated log of the parsing process will be stored in a sub-directory named "DEBUG".
Primary use case: A DSL for the "Mittellateinische Wörterbuch"!
For a simple self-test run `ParserCombinators.py` from the command line. This compiles the EBNF-Grammer in
`examples/EBNF/EBNF.ebnf` and outputs its abstract syntax tree as well as a Python-based parser class representing
that grammar.
..to be continued
\ No newline at end of file
References
----------
Eckhart Arnold: Domänenspezifische Notationen. Eine (noch) unterschätzte Technologie in den Digitalen Geisteswissenschaften, Präsentation auf dem
[dhmuc-Workshop: Digitale Editionen und Auszeichnungssprachen](https://dhmuc.hypotheses.org/workshop-digitale-editionen-und-auszeichnungssprachen),
München 2016.
[Arnold_2016]: https://f.hypotheses.org/wp-content/blogs.dir/1856/files/2016/12/Mueller_Anzeichnung_10_Vortrag_M%C3%BCnchen.pdf
Juancarlo Añez: grako, a PEG parser generator in Python, [Añez_2017]
[Añez_2017]: https://bitbucket.org/apalala/grako
Brian Ford: Parsing Expression Grammars: A Recognition-Based Syntactic Foundation, Cambridge Massachusetts, 2004.
[Ford_2004]: https://pdos.csail.mit.edu/~baford/packrat/popl04/peg-popl04.pdf
[Ford_20XX]: http://bford.info/packrat/
Richard A. Frost, Rahmatullah Hafiz and Paul Callaghan: Parser
Combinators for Ambiguous Left-Recursive Grammars, in: P. Hudak and
D.S. Warren (Eds.): PADL 2008, LNCS 4902, pp. 167–181, Springer-Verlag
Berlin Heidelberg 2008.
Dominikus Herzberg: Objekt-orientierte Parser-Kombinatoren in Python,
Blog-Post, September, 18th 2008 on denkspuren. gedanken, ideen,
anregungen und links rund um informatik-themen, URL: [Herzberg_2008a]
[Herzberg_2008a]: http://denkspuren.blogspot.de/2008/09/objekt-orientierte-parser-kombinatoren.html
Dominikus Herzberg: Eine einfache Grammatik für LaTeX, Blog-Post,
September, 18th 2008 on denkspuren. gedanken, ideen, anregungen und
links rund um informatik-themen, URL: [Herzberg_2008b]
[Herzberg_2008b]: http://denkspuren.blogspot.de/2008/09/eine-einfache-grammatik-fr-latex.html
Dominikus Herzberg: Uniform Syntax, Blog-Post, February, 27th 2007 on
denkspuren. gedanken, ideen, anregungen und links rund um
informatik-themen, URL: [Herzberg_2007]
[Herzberg_2007]: http://denkspuren.blogspot.de/2007/02/uniform-syntax.html
[ISO_IEC_14977]: http://www.cl.cam.ac.uk/~mgk25/iso-14977.pdf
John MacFarlane, David Greenspan, Vicent Marti, Neil Williams, Benjamin Dumke-von der Ehe, Jeff Atwood:
CommonMark. A strongly defined, highly compatible specification of Markdown, 2017.
[MacFarlane_et_al_2017]: http://commonmark.org/
Stefan Müller: DSLs in den digitalen Geisteswissenschaften, Präsentation auf dem
[dhmuc-Workshop: Digitale Editionen und Auszeichnungssprachen](https://dhmuc.hypotheses.org/workshop-digitale-editionen-und-auszeichnungssprachen),
München 2016.
[Müller_2016]: https://f.hypotheses.org/wp-content/blogs.dir/1856/files/2016/12/Mueller_Anzeichnung_10_Vortrag_M%C3%BCnchen.pdf
[tex_stackexchange_no_bnf]: http://tex.stackexchange.com/questions/4201/is-there-a-bnf-grammar-of-the-tex-language
[tex_stackexchange_latex_parsers]: http://tex.stackexchange.com/questions/4223/what-parsers-for-latex-mathematics-exist-outside-of-the-tex-engines
\ No newline at end of file
......@@ -10,7 +10,8 @@ directive = "@" §symbol §"=" ( regexp | literal | list_ )
expression = term { "|" term }
term = { factor }+
factor = [flowmarker] [retrieveop] symbol !"=" # negative lookahead to be sure it's not a definition
factor = [flowmarker] chain
| [flowmarker] [retrieveop] symbol !"=" # negative lookahead to be sure it's not a definition
| [flowmarker] literal
| [flowmarker] regexp
| [flowmarker] group
......@@ -27,6 +28,9 @@ option = "[" expression §"]"
oneormore = "{" expression "}+"
repetition = "{" expression §"}"
chain = { link "--" }+ link # chained regular expressions
link = regexp | symbol # semantic restriction: symbol must evaluate to a regexp or chain
symbol = /(?!\d)\w+/~ # e.g. expression, factor, parameter_list
literal = /"(?:[^"]|\\")*?"/~ # e.g. "(", '+', 'while'
| /'(?:[^']|\\')*?'/~ # whitespace following literals will be ignored tacitly.
......@@ -36,5 +40,3 @@ regexp = /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~ # e.g. /\w+/, ~/#.*(?:\n|$)/~
list_ = /\w+\s*(?:,\s*\w+\s*)*/~ # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
# BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
EOF = !/./
# EBNF-Grammar in EBNF
@ comment = /#.*(?:\n|$)/ # comments start with '#' and eat all chars up to and including '\n'
@ whitespace = /\s*/ # whitespace includes linefeed
@ literalws = right # trailing whitespace of literals will be ignored tacitly
syntax = [~//] { definition | directive } §EOF
definition = symbol §"=" expression
directive = "@" §symbol §"=" ( regexp | literal | list_ )
expression = term { "|" term }
term = { factor }+
factor = [flowmarker] [retrieveop] symbol !"=" # negative lookahead to be sure it's not a definition
| [flowmarker] literal
| [flowmarker] regexp
| [flowmarker] group
| [flowmarker] oneormore
| repetition
| option
flowmarker = "!" | "&" | "§" | # '!' negative lookahead, '&' positive lookahead, '§' required
"-!" | "-&" # '-' negative lookbehind, '-&' positive lookbehind
retrieveop = "::" | ":" # '::' pop, ':' retrieve
group = "(" expression §")"
option = "[" expression §"]"
oneormore = "{" expression "}+"
repetition = "{" expression §"}"
symbol = /(?!\d)\w+/~ # e.g. expression, factor, parameter_list
literal = /"(?:[^"]|\\")*?"/~ # e.g. "(", '+', 'while'
| /'(?:[^']|\\')*?'/~ # whitespace following literals will be ignored tacitly.
regexp = /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~ # e.g. /\w+/, ~/#.*(?:\n|$)/~
# '~' is a whitespace-marker, if present leading or trailing
# whitespace of a regular expression will be ignored tacitly.
list_ = /\w+\s*(?:,\s*\w+\s*)*/~ # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST,
# BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple
EOF = !/./
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment