2.12.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit 1100df44 authored by eckhart's avatar eckhart
Browse files

- refactorings

parent 5aa3acd2
# Transformation factory that can also dispatch on Union typs
def transformation_factory(t=None):
"""Creates factory functions from transformation-functions that
dispatch on the first parameter after the context parameter.
Decorating a transformation-function that has more than merely the
``node``-parameter with ``transformation_factory`` creates a
function with the same name, which returns a partial-function that
takes just the context-parameter.
Additionally, there is some some syntactic sugar for
transformation-functions that receive a collection as their second
parameter and do not have any further parameters. In this case a
list of parameters passed to the factory function will be converted
into a collection.
Main benefit is readability of processing tables.
Usage:
@transformation_factory(AbstractSet[str])
def remove_tokens(context, tokens):
...
or, alternatively:
@transformation_factory
def remove_tokens(context, tokens: AbstractSet[str]):
...
Example:
trans_table = { 'expression': remove_tokens('+', '-') }
instead of:
trans_table = { 'expression': partial(remove_tokens, tokens={'+', '-'}) }
Parameters:
t: type of the second argument of the transformation function,
only necessary if the transformation functions' parameter list
does not have type annotations.
"""
def decorator(f):
sig = inspect.signature(f)
params = list(sig.parameters.values())[1:]
if len(params) == 0:
return f # '@transformer' not needed w/o free parameters
assert t or params[0].annotation != params[0].empty, \
"No type information on second parameter found! Please, use type " \
"annotation or provide the type information via transfomer-decorator."
p1type = t or params[0].annotation
p_types = (p1type,)
if hasattr(p1type, '_subs_tree'):
subs_tree = p1type._subs_tree()
if isinstance(subs_tree, Container) and subs_tree[0] is Union:
p_types = subs_tree[1:]
f = singledispatch(f)
for p1type in p_types:
if len(params) == 1 and issubclass(p1type, Container) \
and not issubclass(p1type, Text) and not issubclass(p1type, ByteString):
def gen_special(*args):
c = set(args) if issubclass(p1type, AbstractSet) else \
list(args) if issubclass(p1type, Sequence) else args
d = {params[0].name: c}
return partial(f, **d)
f.register(p1type.__args__[0], gen_special)
def gen_partial(*args, **kwargs):
print(f.__name__)
d = {p.name: arg for p, arg in zip(params, args)}
d.update(kwargs)
return partial(f, **d)
f.register(p1type, gen_partial)
return f
if isinstance(t, type(lambda: 1)):
# Provide for the case that transformation_factory has been
# written as plain decorator and not as a function call that
# returns the decorator proper.
func = t
t = None
return decorator(func)
else:
return decorator
\ No newline at end of file
......@@ -75,12 +75,12 @@ from DHParser import logging, is_filename, load_if_file, \\
Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, RE, Capture, \\
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \\
last_value, counterpart, accumulate, PreprocessorFunc, \\
Node, TransformationFunc, TransformationDict, TRUE_CONDITION, \\
Node, TransformationFunc, TransformationDict, \\
traverse, remove_children_if, merge_children, is_anonymous, \\
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
content_from_child, replace_by_child, replace_or_reduce, remove_whitespace, \\
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \\
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
remove_parser, remove_content, remove_brackets, replace_parser, \\
remove_nodes, remove_content, remove_brackets, replace_parser, \\
keep_children, is_one_of, has_content, apply_if, remove_first, remove_last
'''
......
......@@ -27,7 +27,7 @@ from DHParser.parser import Grammar, mixin_comment, nil_preprocessor, Forward, R
from DHParser.syntaxtree import Node, TransformationFunc, WHITESPACE_PTYPE, TOKEN_PTYPE
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, typing
from DHParser.transform import traverse, remove_brackets, \
reduce_single_child, replace_by_single_child, remove_expendables, \
content_from_child, replace_by_child, remove_expendables, \
remove_tokens, flatten, forbid, assert_content, remove_infix_operator
from DHParser.versionnumber import __version__
from typing import Callable, Dict, List, Set, Tuple
......@@ -198,30 +198,30 @@ EBNF_AST_transformation_table = {
"+":
remove_expendables,
"syntax":
[], # otherwise '"*": replace_by_single_child' would be applied
[], # otherwise '"*": replace_by_child' would be applied
"directive, definition":
remove_tokens('@', '='),
"expression":
[replace_by_single_child, flatten, remove_tokens('|')], # remove_infix_operator],
[replace_by_child, flatten, remove_tokens('|')], # remove_infix_operator],
"term":
[replace_by_single_child, flatten], # supports both idioms: "{ factor }+" and "factor { factor }"
[replace_by_child, flatten], # supports both idioms: "{ factor }+" and "factor { factor }"
"factor, flowmarker, retrieveop":
replace_by_single_child,
replace_by_child,
"group":
[remove_brackets, replace_by_single_child],
[remove_brackets, replace_by_child],
"unordered":
remove_brackets,
"oneormore, repetition, option":
[reduce_single_child, remove_brackets,
[content_from_child, remove_brackets,
forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)')],
"symbol, literal, regexp":
reduce_single_child,
content_from_child,
(TOKEN_PTYPE, WHITESPACE_PTYPE):
reduce_single_child,
content_from_child,
"list_":
[flatten, remove_infix_operator],
"*":
replace_by_single_child
replace_by_child
}
......@@ -438,10 +438,10 @@ class EBNFCompiler(Compiler):
if rule.startswith('Alternative'):
transformations = '[replace_or_reduce]'
elif rule.startswith('Synonym'):
transformations = '[reduce_single_child]'
transformations = '[content_from_child]'
transtable.append(' "' + name + '": %s,' % transformations)
transtable.append(' ":Token, :RE": reduce_single_child,')
transtable += [' "*": replace_by_single_child', '}', '']
transtable.append(' ":Token, :RE": content_from_child,')
transtable += [' "*": replace_by_child', '}', '']
transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name)]
return '\n'.join(transtable)
......
......@@ -1185,7 +1185,7 @@ class RE(Parser):
>>> result.structure()
'(:RE (:RegExp "Haus") (:Whitespace " "))'
>>> parser(' Haus').content()
' <<< Error on " Haus" | Parser did not match! Invalid source file?\\n Most advanced: None\\n Last match: None; >>> '
' <<< Error on " Haus" | Parser did not match! Invalid source file?\n Most advanced: None\n Last match: None; >>> '
EBNF-Notation: `/ ... /~` or `~/ ... /` or `~/ ... /~`
EBNF-Example: `word = /\w+/~`
......@@ -1437,7 +1437,7 @@ class OneOrMore(UnaryOperator):
>>> Grammar(sentence)('Wo viel der Weisheit, da auch viel des Grämens.').content()
'Wo viel der Weisheit, da auch viel des Grämens.'
>>> Grammar(sentence)('.').content() # an empty sentence also matches
' <<< Error on "." | Parser did not match! Invalid source file?\\n Most advanced: None\\n Last match: None; >>> '
' <<< Error on "." | Parser did not match! Invalid source file?\n Most advanced: None\n Last match: None; >>> '
EBNF-Notation: `{ ... }+`
EBNF-Example: `sentence = { /\w+,?/ }+`
......@@ -1480,7 +1480,7 @@ class Series(NaryOperator):
>>> Grammar(variable_name)('variable_1').content()
'variable_1'
>>> Grammar(variable_name)('1_variable').content()
' <<< Error on "1_variable" | Parser did not match! Invalid source file?\\n Most advanced: None\\n Last match: None; >>> '
' <<< Error on "1_variable" | Parser did not match! Invalid source file?\n Most advanced: None\n Last match: None; >>> '
EBNF-Notation: `... ...` (sequence of parsers separated by a blank or new line)
EBNF-Example: `series = letter letter_or_digit`
......
......@@ -36,8 +36,8 @@ __all__ = ('TransformationDict',
'key_tag_name',
'traverse',
'is_named',
'replace_by_single_child',
'reduce_single_child',
'replace_by_child',
'content_from_child',
'replace_or_reduce',
'replace_parser',
'collapse',
......@@ -52,8 +52,7 @@ __all__ = ('TransformationDict',
'is_one_of',
'has_content',
'remove_children_if',
'remove_children',
'remove_parser',
'remove_nodes',
'remove_content',
'remove_first',
'remove_last',
......@@ -70,8 +69,7 @@ __all__ = ('TransformationDict',
'require',
'assert_content',
'assert_condition',
'assert_has_children',
'TRUE_CONDITION')
'assert_has_children')
TransformationProc = Callable[[List[Node]], None]
......@@ -79,8 +77,10 @@ TransformationDict = Dict[str, Sequence[Callable]]
ProcessingTableType = Dict[str, Union[Sequence[Callable], TransformationDict]]
ConditionFunc = Callable # Callable[[List[Node]], bool]
KeyFunc = Callable[[Node], str]
CriteriaType = Union[int, str, Callable]
# TODO: Add more optional type dispatch paramters, e.g. t2=None, t3=None, t4=None
def transformation_factory(t=None):
"""Creates factory functions from transformation-functions that
dispatch on the first parameter after the context parameter.
......@@ -99,7 +99,7 @@ def transformation_factory(t=None):
Main benefit is readability of processing tables.
Usage:
@transformation_factory(AbtractSet[str])
@transformation_factory(AbstractSet[str])
def remove_tokens(context, tokens):
...
or, alternatively:
......@@ -128,8 +128,9 @@ def transformation_factory(t=None):
"annotation or provide the type information via transfomer-decorator."
p1type = t or params[0].annotation
f = singledispatch(f)
if len(params) == 1 and issubclass(p1type, Container) and not issubclass(p1type, Text) \
and not issubclass(p1type, ByteString):
try:
if len(params) == 1 and issubclass(p1type, Container) \
and not issubclass(p1type, Text) and not issubclass(p1type, ByteString):
def gen_special(*args):
c = set(args) if issubclass(p1type, AbstractSet) else \
list(args) if issubclass(p1type, Sequence) else args
......@@ -137,6 +138,8 @@ def transformation_factory(t=None):
return partial(f, **d)
f.register(p1type.__args__[0], gen_special)
except AttributeError:
pass # Union Type does not allow subclassing, but is not needed here
def gen_partial(*args, **kwargs):
d = {p.name: arg for p, arg in zip(params, args)}
......@@ -196,8 +199,8 @@ def traverse(root_node: Node,
key_func yields node.parser.name.
Example:
table = { "term": [replace_by_single_child, flatten],
"factor, flowmarker, retrieveop": replace_by_single_child }
table = { "term": [replace_by_child, flatten],
"factor, flowmarker, retrieveop": replace_by_child }
traverse(node, table)
"""
# Is this optimazation really needed?
......@@ -262,13 +265,29 @@ def traverse(root_node: Node,
# ------------------------------------------------
def TRUE_CONDITION(context: List[Node]) -> bool:
return True
def pick_child(context: List[Node], criteria: CriteriaType):
"""Returns the first child that meets the criteria."""
if isinstance(criteria, int):
try:
return context[-1].children[criteria]
except IndexError:
return None
elif isinstance(criteria, str):
for child in context[-1].children:
if child.tag_name == criteria:
return child
return None
else: # assume criteria has type ConditionFunc
for child in context[-1].children:
context.append(child)
evaluation = criteria(context)
context.pop()
if evaluation:
return child
return None
def replace_child(node: Node):
assert len(node.children) == 1
child = node.children[0]
def replace_by(node: Node, child: Node):
if not child.parser.name:
child.parser = MockParser(node.parser.name, child.parser.ptype)
# parser names must not be overwritten, else: child.parser.name = node.parser.name
......@@ -277,30 +296,56 @@ def replace_child(node: Node):
node.result = child.result
def reduce_child(node: Node):
assert len(node.children) == 1
node._errors.extend(node.children[0]._errors)
node.result = node.children[0].result
def reduce_child(node: Node, child: Node):
node._errors.extend(child._errors)
node.result = child.result
@transformation_factory(Callable)
def replace_by_single_child(context: List[Node], condition: Callable=TRUE_CONDITION):
# TODO: default value = lambda context: len(context[-1].children) == 1
# @transformation_factory(int, str, Callable)
# def replace_by_child(context: List[Node], criteria: CriteriaType=0):
# """
# Replace a node by the first of its immediate descendants
# that meets the `criteria`. The criteria can either be the
# index of the child (counting from zero), or the tag name or
# a boolean-valued function on the context of the child.
# If no child matching the criteria is found, the node will
# not be replaced.
# """
# child = pick_child(context, criteria)
# if child:
# print(child)
# replace_by(context[-1], child)
# @transformation_factory(int, str, Callable)
# def content_from_child(context: List[None], criteria: CriteriaType=0):
# """
# Reduce a node, by transferring the result of the first of its
# immediate descendants that meets the `criteria` to this node,
# but keeping this node's parser entry. The criteria can either
# be the index of the child (counting from zero), or the tag
# name or a boolean-valued function on the context of the child.
# If no child matching the criteria is found, the node will
# not be replaced.
# """
# child = pick_child(context, criteria)
# if child:
# reduce_child(context[-1], child)
def replace_by_child(context: List[Node]):
"""
Remove single branch node, replacing it by its immediate descendant
if and only if the condision on the descendant is true.
(In case the descendant's name is empty (i.e. anonymous) the
name of this node's parser is kept.)
if and only if the condition on the descendant is true.
"""
node = context[-1]
if len(node.children) == 1:
context.append(node.children[0])
if condition(context):
replace_child(node)
context.pop()
replace_by(node, node.children[0])
@transformation_factory(Callable)
def reduce_single_child(context: List[Node], condition: Callable=TRUE_CONDITION):
def content_from_child(context: List[Node]):
"""
Reduce a single branch node, by transferring the result of its
immediate descendant to this node, but keeping this node's parser entry.
......@@ -309,10 +354,7 @@ def reduce_single_child(context: List[Node], condition: Callable=TRUE_CONDITION)
"""
node = context[-1]
if len(node.children) == 1:
context.append(node.children[0])
if condition(context):
reduce_child(node)
context.pop()
reduce_child(node, node.children[0])
def is_named(context: List[Node]) -> bool:
......@@ -333,7 +375,7 @@ def replace_or_reduce(context: List[Node], condition: Callable=is_named):
if len(node.children) == 1:
context.append(node.children[0])
if condition(context):
replace_child(node)
replace_by(node)
else:
reduce_child(node)
context.pop()
......@@ -497,27 +539,27 @@ def remove_children_if(context: List[Node], condition: Callable): # , section:
node.result = tuple(c for c in node.children if not condition(context + [c]))
@transformation_factory(Callable)
def remove_children(context: List[Node],
condition: Callable = TRUE_CONDITION,
section: slice = slice(None)):
"""Removes all nodes from a slice of the result field if the function
`condition(child_node)` evaluates to `True`."""
node = context[-1]
if node.children:
c = node.children
N = len(c)
rng = range(*section.indices(N))
node.result = tuple(c[i] for i in range(N)
if i not in rng or not condition(context + [c[i]]))
# selection = []
# for i in range(N):
# context.append(c[i])
# if not i in rng or not condition(context):
# selection.append(c[i])
# context.pop()
# if len(selection) != c:
# node.result = tuple(selection)
# @transformation_factory(Callable)
# def remove_children(context: List[Node],
# condition: Callable = TRUE_CONDITION,
# section: slice = slice(None)):
# """Removes all nodes from a slice of the result field if the function
# `condition(child_node)` evaluates to `True`."""
# node = context[-1]
# if node.children:
# c = node.children
# N = len(c)
# rng = range(*section.indices(N))
# node.result = tuple(c[i] for i in range(N)
# if i not in rng or not condition(context + [c[i]]))
# # selection = []
# # for i in range(N):
# # context.append(c[i])
# # if not i in rng or not condition(context):
# # selection.append(c[i])
# # context.pop()
# # if len(selection) != c:
# # node.result = tuple(selection)
remove_whitespace = remove_children_if(is_whitespace) # partial(remove_children_if, condition=is_whitespace)
......@@ -539,7 +581,7 @@ def remove_tokens(context: List[Node], tokens: AbstractSet[str] = frozenset()):
@transformation_factory
def remove_parser(context: List[Node], tag_names: AbstractSet[str]):
def remove_nodes(context: List[Node], tag_names: AbstractSet[str]):
"""Removes children by tag name."""
remove_children_if(context, partial(is_one_of, tag_name_set=tag_names))
......
......@@ -366,22 +366,22 @@ scroll down to the AST section, you'll see something like this:
Lyrik_AST_transformation_table = {
# AST Transformations for the Lyrik-grammar
"+": remove_empty,
"bibliographisches": [remove_parser('NZ'), remove_tokens],
"bibliographisches": [remove_nodes('NZ'), remove_tokens],
"autor, werk, untertitel, ort": [],
"jahr": [reduce_single_child],
"jahr": [content_from_child],
"wortfolge": [flatten(is_one_of('WORT'), recursive=False), remove_last(is_whitespace), collapse],
"namenfolge": [flatten(is_one_of('NAME'), recursive=False), remove_last(is_whitespace), collapse],
"verknüpfung": [remove_tokens('<', '>'), reduce_single_child],
"ziel": reduce_single_child,
"gedicht, strophe, text": [flatten, remove_parser('LEERZEILE'), remove_parser('NZ')],
"titel, serie": [flatten, remove_parser('LEERZEILE'), remove_parser('NZ'), collapse],
"verknüpfung": [remove_tokens('<', '>'), content_from_child],
"ziel": content_from_child,
"gedicht, strophe, text": [flatten, remove_nodes('LEERZEILE'), remove_nodes('NZ')],
"titel, serie": [flatten, remove_nodes('LEERZEILE'), remove_nodes('NZ'), collapse],
"vers": collapse,
"zeile": [],
"ZEICHENFOLGE, NZ, JAHRESZAHL": reduce_single_child,
"ZEICHENFOLGE, NZ, JAHRESZAHL": content_from_child,
"WORT, NAME, LEERZEILE, ENDE": [],
":Whitespace": replace_content(lambda node : " "),
":Token, :RE": reduce_single_child,
"*": replace_by_single_child
":Token, :RE": content_from_child,
"*": replace_by_child
}
As you can see, AST-transformations a specified declaratively (with the
......
......@@ -25,9 +25,9 @@ from DHParser import logging, is_filename, load_if_file, \
last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, TransformationDict, TRUE_CONDITION, \
traverse, remove_children_if, merge_children, is_anonymous, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
content_from_child, replace_by_child, replace_or_reduce, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, remove_parser, remove_content, remove_brackets, replace_parser, \
is_empty, is_expendable, collapse, replace_content, remove_nodes, remove_content, remove_brackets, replace_parser, \
keep_children, is_one_of, has_content, apply_if, remove_first, remove_last, \
WHITESPACE_PTYPE, TOKEN_PTYPE
......@@ -159,8 +159,8 @@ BibTeX_AST_transformation_table = {
"content": [replace_or_reduce],
"plain_content": [],
"text": [],
":Token, :RE": reduce_single_child,
"*": replace_by_single_child
":Token, :RE": content_from_child,
"*": replace_by_child
}
......
......@@ -22,10 +22,10 @@ from DHParser import logging, is_filename, load_if_file, \
last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, TransformationDict, TRUE_CONDITION, \
traverse, remove_children_if, merge_children, is_anonymous, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
content_from_child, replace_by_child, replace_or_reduce, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_parser, remove_content, remove_brackets, replace_parser, \
remove_nodes, remove_content, remove_brackets, replace_parser, \
keep_children, is_one_of, has_content, apply_if, remove_first, remove_last
......@@ -156,8 +156,8 @@ EBNF_AST_transformation_table = {
"regexp": [],
"list_": [],
"EOF": [],
":Token, :RE": reduce_single_child,
"*": replace_by_single_child
":Token, :RE": content_from_child,
"*": replace_by_child
}
......
......@@ -21,9 +21,9 @@ from DHParser import logging, is_filename, Grammar, Compiler, Lookbehind, Altern
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \
PreprocessorFunc, TransformationDict, \
Node, TransformationFunc, traverse, remove_children_if, is_anonymous, \
reduce_single_child, replace_by_single_child, remove_whitespace, \
content_from_child, replace_by_child, remove_whitespace, \
flatten, is_empty, collapse, replace_content, remove_brackets, is_one_of, remove_first, \
remove_tokens, remove_parser, TOKEN_PTYPE
remove_tokens, remove_nodes, TOKEN_PTYPE
#######################################################################
......@@ -230,7 +230,7 @@ class LaTeXGrammar(Grammar):
paragraph = Forward()
tabular_config = Forward()
text_element = Forward()
source_hash__ = "1ded00ed838b03fcffcc6cd4333d4ae0"
source_hash__ = "a078d3d46ee55a7543f37c62b3fb24a7"
parser_initialization__ = "upon instantiation"
COMMENT__ = r'%.*'
WHITESPACE__ = r'[ \t]*(?:\n(?![ \t]*\n)[ \t]*)?'
......@@ -389,52 +389,52 @@ LaTeX_AST_transformation_table = {
"latexdoc": [],
"preamble": [],
"document": [flatten_structure],
"frontpages": reduce_single_child,
"frontpages": content_from_child,
"Chapters, Sections, SubSections, SubSubSections, Paragraphs, SubParagraphs": [],
"Chapter, Section, SubSection, SubSubSection, Paragraph, SubParagraph": [],
"heading": reduce_single_child,
"heading": content_from_child,
"Bibliography": [],
"Index": [],
"block_environment": replace_by_single_child,
"known_environment": replace_by_single_child,
"block_environment": replace_by_child,
"known_environment": replace_by_child,
"generic_block": [],
"begin_generic_block, end_generic_block": [remove_parser('NEW_LINE'), replace_by_single_child],
"begin_generic_block, end_generic_block": [remove_nodes('NEW_LINE'), replace_by_child],
"itemize, enumerate": [remove_brackets, flatten],
"item": [],
"figure": [],
"quotation": [reduce_single_child, remove_brackets],
"quotation": [content_from_child, remove_brackets],
"verbatim": [],
"tabular": [],
"tabular_config, block_of_paragraphs": [remove_brackets, reduce_single_child],
"tabular_config, block_of_paragraphs": [remove_brackets, content_from_child],
"tabular_row": [flatten, remove_tokens('&', '\\')],
"tabular_cell": [flatten, remove_whitespace],
"multicolumn": [remove_tokens('{', '}')],
"hline": [remove_whitespace, reduce_single_child],
"hline": [remove_whitespace, content_from_child],
"sequence": [flatten],
"paragraph": [flatten],
"text_element": replace_by_single_child,
"line_element": replace_by_single_child,
"inline_environment": replace_by_single_child,
"known_inline_env": replace_by_single_child,
"text_element": replace_by_child,
"line_element": replace_by_child,
"inline_environment": replace_by_child,
"known_inline_env": replace_by_child,
"generic_inline_env": [],
"begin_inline_env, end_inline_env": [replace_by_single_child],
"begin_environment, end_environment": [remove_brackets, reduce_single_child],
"inline_math": [remove_brackets, reduce_single_child],
"command": replace_by_single_child,
"known_command": replace_by_single_child,
"begin_inline_env, end_inline_env": [replace_by_child],
"begin_environment, end_environment": [remove_brackets, content_from_child],
"inline_math": [remove_brackets, content_from_child],
"command": replace_by_child,
"known_command": replace_by_child,
"text_command": [],
"generic_command": [flatten],
"footnote": [],
"includegraphics": [],
"caption": [],
"config": [remove_brackets, reduce_single_child],
"block": [remove_brackets, flatten, replace_by_single_child],
"config": [remove_brackets, content_from_child],
"block": [remove_brackets, flatten, replace_by_child],