Commit f277041a authored by di68kap's avatar di68kap
Browse files

- Test Case for Bug: Problem with greedy regex in ebnf.EBNFGrammar

parent 77bad89f
......@@ -21,3 +21,4 @@ test/tmp*
build/
dist/
MANIFEST
playground/*
......@@ -119,7 +119,8 @@ if __name__ == "__main__":
class GrammarError(Exception):
"""Raised when (already) the grammar of a domain specific language (DSL)
"""
Raised when (already) the grammar of a domain specific language (DSL)
contains errors.
"""
......@@ -129,7 +130,8 @@ class GrammarError(Exception):
class CompilationError(Exception):
"""Raised when a string or file in a domain specific language (DSL)
"""
Raised when a string or file in a domain specific language (DSL)
contains errors.
"""
def __init__(self, error_messages, dsl_text, dsl_grammar, AST, result):
......@@ -144,7 +146,8 @@ class CompilationError(Exception):
def grammar_instance(grammar_representation) -> Tuple[Grammar, str]:
"""Returns a grammar object and the source code of the grammar, from
"""
Returns a grammar object and the source code of the grammar, from
the given `grammar`-data which can be either a file name, ebnf-code,
python-code, a Grammar-derived grammar class or an instance of
such a class (i.e. a grammar object already).
......@@ -177,7 +180,8 @@ def compileDSL(text_or_file: str,
dsl_grammar: Union[str, Grammar],
ast_transformation: TransformationFunc,
compiler: Compiler) -> Any:
"""Compiles a text in a domain specific language (DSL) with an
"""
Compiles a text in a domain specific language (DSL) with an
EBNF-specified grammar. Returns the compiled text or raises a
compilation error.
......@@ -197,7 +201,8 @@ def compileDSL(text_or_file: str,
def raw_compileEBNF(ebnf_src: str, branding="DSL") -> EBNFCompiler:
"""Compiles an EBNF grammar file and returns the compiler object
"""
Compiles an EBNF grammar file and returns the compiler object
that was used and which can now be queried for the result as well
as skeleton code for scanner, transformer and compiler objects.
......@@ -218,7 +223,8 @@ def raw_compileEBNF(ebnf_src: str, branding="DSL") -> EBNFCompiler:
def compileEBNF(ebnf_src: str, branding="DSL") -> str:
"""Compiles an EBNF source file and returns the source code of a
"""
Compiles an EBNF source file and returns the source code of a
compiler suite with skeletons for scanner, transformer and
compiler.
......@@ -244,7 +250,8 @@ def compileEBNF(ebnf_src: str, branding="DSL") -> str:
def parser_factory(ebnf_src: str, branding="DSL") -> Grammar:
"""Compiles an EBNF grammar and returns a grammar-parser factory
"""
Compiles an EBNF grammar and returns a grammar-parser factory
function for that grammar.
Args:
......@@ -264,7 +271,8 @@ def parser_factory(ebnf_src: str, branding="DSL") -> Grammar:
def load_compiler_suite(compiler_suite: str) -> \
Tuple[ScannerFactoryFunc, ParserFactoryFunc, TransformerFactoryFunc, CompilerFactoryFunc]:
"""Extracts a compiler suite from file or string ``compiler suite``
"""
Extracts a compiler suite from file or string ``compiler suite``
and returns it as a tuple (scanner, parser, ast, compiler).
Returns:
......@@ -300,7 +308,8 @@ def load_compiler_suite(compiler_suite: str) -> \
def is_outdated(compiler_suite: str, grammar_source: str) -> bool:
"""Returns ``True`` if the ``compile_suite`` needs to be updated.
"""
Returns ``True`` if the ``compile_suite`` needs to be updated.
An update is needed, if either the grammar in the compieler suite
does not reflect the latest changes of ``grammar_source`` or if
......@@ -348,7 +357,8 @@ def run_compiler(text_or_file: str, compiler_suite: str) -> Any:
def compile_on_disk(source_file: str, compiler_suite="", extension=".xml"):
"""Compiles the a source file with a given compiler and writes the
"""
Compiles the a source file with a given compiler and writes the
result to a file.
If no ``compiler_suite`` is given it is assumed that the source
......
......@@ -73,6 +73,7 @@ from DHParser.syntaxtree import WHITESPACE_PTYPE, TOKEN_PTYPE, ZOMBIE_PARSER, Pa
Node, TransformationFunc
from DHParser.toolkit import load_if_file, error_messages
__all__ = ['ScannerFunc',
'HistoryRecord',
'Parser',
......@@ -112,6 +113,7 @@ __all__ = ['ScannerFunc',
'compile_source']
########################################################################
#
# Grammar and parsing infrastructure
......@@ -128,6 +130,7 @@ LEFT_RECURSION_DEPTH = 20 if platform.python_implementation() == "PyPy" \
MAX_DROPOUTS = 25 # type: int
# stop trying to recover parsing after so many errors
class HistoryRecord:
"""
Stores debugging information about one completed step in the
......@@ -292,7 +295,8 @@ class Parser(ParserBase, metaclass=ParserMetaClass):
pass
def apply(self, func: ApplyFunc):
"""Applies function `func(parser)` recursively to this parser and all
"""
Applies function `func(parser)` recursively to this parser and all
descendants of the tree of parsers. The same function can never
be applied twice between calls of the ``reset()``-method!
"""
......@@ -305,7 +309,8 @@ class Parser(ParserBase, metaclass=ParserMetaClass):
def mixin_comment(whitespace: str, comment: str) -> str:
"""Returns a regular expression that merges comment and whitespace
"""
Returns a regular expression that merges comment and whitespace
regexps. Thus comments cann occur whereever whitespace is allowed
and will be skipped just as implicit whitespace.
......@@ -327,9 +332,11 @@ class Grammar:
wspL__ = ''
wspR__ = WSP__
@classmethod
def _assign_parser_names(cls):
"""Initializes the `parser.name` fields of those
"""
Initializes the `parser.name` fields of those
Parser objects that are directly assigned to a class field with
the field's name, e.g.
class Grammar(Grammar):
......@@ -361,6 +368,7 @@ class Grammar:
parser.parser.name = entry
cls.parser_initialization__ = "done"
def __init__(self, root: Parser=None) -> None:
# if not hasattr(self.__class__, 'parser_initialization__'):
# self.__class__.parser_initialization__ = "pending"
......@@ -372,13 +380,16 @@ class Grammar:
self.dirty_flag__ = False
self.history_tracking__ = False
self._reset__()
# prepare parsers in the class, first
self._assign_parser_names()
# then deep-copy the parser tree from class to instance;
# parsers not connected to the root object will be copied later
# on demand (see Grammar.__getitem__()). Usually, the need to
# do so only arises during testing.
self.root__ = root if root else copy.deepcopy(self.__class__.root__)
if self.wspL__:
self.wsp_left_parser__ = Whitespace(self.wspL__) # type: ParserBase
self.wsp_left_parser__.grammar = self
......@@ -393,6 +404,7 @@ class Grammar:
self.wsp_right_parser__ = ZOMBIE_PARSER
self.root__.apply(self._add_parser__)
def __getitem__(self, key):
try:
return self.__dict__[key]
......@@ -406,6 +418,7 @@ class Grammar:
return self[key]
raise KeyError('Unknown parser "%s" !' % key)
def _reset__(self):
self.document__ = "" # type: str
# variables stored and recalled by Capture and Retrieve parsers
......@@ -422,8 +435,10 @@ class Grammar:
self.moving_forward__ = True # type: bool
self.left_recursion_encountered__ = False # type: bool
def _add_parser__(self, parser: Parser) -> None:
"""Adds the particular copy of the parser object to this
"""
Adds the particular copy of the parser object to this
particular instance of Grammar.
"""
if parser.name:
......@@ -436,8 +451,10 @@ class Grammar:
self.all_parsers__.add(parser)
parser.grammar = self
def __call__(self, document: str, start_parser="root__") -> Node:
"""Parses a document with with parser-combinators.
"""
Parses a document with with parser-combinators.
Args:
document (str): The source text to be parsed.
......@@ -504,8 +521,10 @@ class Grammar:
result.pos = 0 # calculate all positions
return result
def push_rollback__(self, location, func):
"""Adds a rollback function that either removes or re-adds
"""
Adds a rollback function that either removes or re-adds
values on the variable stack (`self.variables`) that have been
added (or removed) by Capture or Pop Parsers, the results of
which have been dismissed.
......@@ -513,8 +532,10 @@ class Grammar:
self.rollback__.append((location, func))
self.last_rb__loc__ = location
def rollback_to__(self, location):
"""Rolls back the variable stacks (`self.variables`) to its
"""
Rolls back the variable stacks (`self.variables`) to its
state at an earlier location in the parsed document.
"""
while self.rollback__ and self.rollback__[-1][0] <= location:
......@@ -524,8 +545,10 @@ class Grammar:
self.last_rb__loc__ == self.rollback__[-1][0] if self.rollback__ \
else (len(self.document__) + 1)
def log_parsing_history__(self, log_file_name: str = '') -> None:
"""Writes a log of the parsing history of the most recently parsed
"""
Writes a log of the parsing history of the most recently parsed
document.
"""
def prepare_line(record):
......@@ -558,7 +581,8 @@ class Grammar:
def dsl_error_msg(parser: Parser, error_str: str) -> str:
"""Returns an error message for errors in the parser configuration,
"""
Returns an error message for errors in the parser configuration,
e.g. errors that result in infinite loops.
Args:
......@@ -578,6 +602,7 @@ def dsl_error_msg(parser: Parser, error_str: str) -> str:
return " ".join(msg)
########################################################################
#
# Token and Regular Expression parser classes (i.e. leaf classes)
......@@ -591,7 +616,8 @@ END_SCANNER_TOKEN = '\x1c'
def make_token(token: str, argument: str = '') -> str:
"""Turns the ``token`` and ``argument`` into a special token that
"""
Turns the ``token`` and ``argument`` into a special token that
will be caught by the `ScannerToken`-parser.
This function is a support function that should be used by scanners
......@@ -766,7 +792,8 @@ class RE(Parser):
class Token(RE):
"""Class Token parses simple strings. Any regular regular
"""
Class Token parses simple strings. Any regular regular
expression commands will be interpreted as simple sequence of
characters.
......@@ -827,7 +854,8 @@ class NaryOperator(Parser):
class Synonym(UnaryOperator):
"""Simply calls another parser and encapsulates the result in
"""
Simply calls another parser and encapsulates the result in
another node if that parser matches.
This parser is needed to support synonyms in EBNF, e.g.
......@@ -954,7 +982,8 @@ class Series(NaryOperator):
class Alternative(NaryOperator):
"""Matches if at least one of several alternatives matches. Returns
"""
Matches if at least one of several alternatives matches. Returns
the first match.
This parser represents the EBNF-operator "|" with the qualification
......@@ -1010,6 +1039,7 @@ class Alternative(NaryOperator):
return self
########################################################################
#
# Flow control operators
......@@ -1259,6 +1289,7 @@ class Forward(Parser):
class Compiler:
def __init__(self, grammar_name="", grammar_source=""):
self.dirty_flag = False
self.set_grammar_name(grammar_name, grammar_source)
......
......@@ -34,7 +34,8 @@ from DHParser.dsl import compile_on_disk
def mock_syntax_tree(sexpr):
"""Generates a tree of nodes from an S-expression.
"""
Generates a tree of nodes from an S-expression.
Example:
>>> mock_syntax_tree("(a (b c))").as_sxpr()
......@@ -84,7 +85,8 @@ def mock_syntax_tree(sexpr):
def recompile_grammar(ebnf_filename, force=False) -> bool:
"""Recompiles an ebnf-grammar if necessary, that is if either no
"""
Recompiles an ebnf-grammar if necessary, that is if either no
corresponding 'XXXXCompiler.py'-file exists or if that file is
outdated.
......@@ -128,7 +130,8 @@ UNIT_STAGES = {'match', 'fail', 'ast', 'cst', '__ast__', '__cst__'}
def unit_from_configfile(config_filename):
"""Reads a grammar unit test from a config file.
"""
Reads a grammar unit test from a config file.
"""
cfg = configparser.ConfigParser(interpolation=None)
cfg.read(config_filename)
......@@ -152,7 +155,8 @@ def unit_from_configfile(config_filename):
def unit_from_json(json_filename):
"""Reads a grammar unit test from a json file.
"""
Reads a grammar unit test from a json file.
"""
with open(json_filename, 'r') as f:
unit = json.load(f)
......@@ -166,7 +170,8 @@ def unit_from_json(json_filename):
def unit_from_file(filename):
"""Reads a grammar unit test from a file. The format of the file is
"""
Reads a grammar unit test from a file. The format of the file is
determined by the ending of its name.
"""
if filename.endswith(".json"):
......@@ -178,7 +183,8 @@ def unit_from_file(filename):
def get_report(test_unit):
"""Returns a text-report of the results of a grammar unit test.
"""
Returns a text-report of the results of a grammar unit test.
"""
report = []
for parser_name, tests in test_unit.items():
......@@ -205,7 +211,8 @@ def get_report(test_unit):
def grammar_unit(test_unit, parser_factory, transformer_factory, report=True, verbose=False):
"""Unit tests for a grammar-parser and ast transformations.
"""
Unit tests for a grammar-parser and ast transformations.
"""
if isinstance(test_unit, str):
unit_dir, unit_name = os.path.split(os.path.splitext(test_unit)[0])
......@@ -288,7 +295,8 @@ def grammar_unit(test_unit, parser_factory, transformer_factory, report=True, ve
def grammar_suite(directory, parser_factory, transformer_factory, ignore_unknown_filetypes=False,
report=True, verbose=False):
"""Runs all grammar unit tests in a directory. A file is considered a test
"""
Runs all grammar unit tests in a directory. A file is considered a test
unit, if it has the word "test" in its name.
"""
all_errors = collections.OrderedDict()
......@@ -318,7 +326,8 @@ def grammar_suite(directory, parser_factory, transformer_factory, ignore_unknown
def runner(tests, namespace):
""" Runs all or some selected Python unit tests found in the
"""
Runs all or some selected Python unit tests found in the
namespace. To run all tests in a module, call
``runner("", globals())`` from within that module.
......
......@@ -113,7 +113,7 @@ text = { cfgtext | (BRACKETS //~) }+
cfgtext = { word_sequence | (ESCAPED //~) }+
word_sequence = { TEXTCHUNK //~ }+
blockcmd = "\" ("begin{" ("enumerate" | "itemize" | "figure" | "quote"
blockcmd = /\/ ("begin{" ("enumerate" | "itemize" | "figure" | "quote"
| "quotation" | "tabular") "}"
| "subsection" | "section" | "chapter" | "subsubsection"
| "paragraph" | "subparagraph" | "item")
......
......@@ -163,7 +163,7 @@ class LaTeXGrammar(Grammar):
cfgtext = { word_sequence | (ESCAPED //~) }+
word_sequence = { TEXTCHUNK //~ }+
blockcmd = "\" ("begin{" ("enumerate" | "itemize" | "figure" | "quote"
blockcmd = /A/ ("begin{" ("enumerate" | "itemize" | "figure" | "quote"
| "quotation" | "tabular") "}"
| "subsection" | "section" | "chapter" | "subsubsection"
| "paragraph" | "subparagraph" | "item")
......@@ -192,7 +192,7 @@ class LaTeXGrammar(Grammar):
block_enrivonment = Forward()
block_of_paragraphs = Forward()
text_elements = Forward()
source_hash__ = "519dd615a108d58ae0577825f2dddd39"
source_hash__ = "7ef00020ebbb2b82e36d38460de56370"
parser_initialization__ = "upon instantiation"
COMMENT__ = r'%.*(?:\n|$)'
WSP__ = mixin_comment(whitespace=r'[ \t]*(?:\n(?![ \t]*\n)[ \t]*)?', comment=r'%.*(?:\n|$)')
......@@ -208,7 +208,7 @@ class LaTeXGrammar(Grammar):
MATH = RE('[\\w_^{}[\\]]*')
NAME = Capture(RE('\\w+'))
CMDNAME = RE('\\\\(?:(?!_)\\w)+')
blockcmd = Series(Token("\\"), Alternative(Series(Token("begin{"), Alternative(Token("enumerate"), Token("itemize"), Token("figure"), Token("quote"), Token("quotation"), Token("tabular")), Token("}")), Token("subsection"), Token("section"), Token("chapter"), Token("subsubsection"), Token("paragraph"), Token("subparagraph"), Token("item")))
blockcmd = Series(RE('A', wR=''), Alternative(Series(Token("begin{"), Alternative(Token("enumerate"), Token("itemize"), Token("figure"), Token("quote"), Token("quotation"), Token("tabular")), Token("}")), Token("subsection"), Token("section"), Token("chapter"), Token("subsubsection"), Token("paragraph"), Token("subparagraph"), Token("item")))
word_sequence = OneOrMore(Series(TEXTCHUNK, RE('')))
cfgtext = OneOrMore(Alternative(word_sequence, Series(ESCAPED, RE(''))))
text = OneOrMore(Alternative(cfgtext, Series(BRACKETS, RE(''))))
......
......@@ -23,7 +23,7 @@ import sys
sys.path.extend(['../../', '../', './'])
from DHParser import testing
if not testing.recompile_grammar('LaTeX.ebnf'): # recompiles Grammar only if it has changed
if not testing.recompile_grammar('LaTeX.ebnf', force=True): # recompiles Grammar only if it has changed
with open('LaTeX_ebnf_ERRORS.txt') as f:
print(f.read())
sys.exit(1)
......
<?xml version="1.0" encoding="UTF-8"?>
<!-- DTD für MLW Erfassung neuer Artikel -->
<!-- erstellt von Ursula Welsch, BADW -->
<!-- Stand: 6.7.2017 -->
<!-- Parameter-Entities -->
<!ENTITY % textauszeichnungen "kursiv | gesperrt | gerade" >
<!-- Lexikon -->
<!ELEMENT MLW-test (artikel)+ >
<!-- Artikel -->
<!ELEMENT artikel (lemma-position, artikelkopf?, bedeutung-position, verweis-position?, artikel-verfasser) >
<!ATTLIST artikel xml:id ID #REQUIRED >
<!-- =========================-->
<!-- Lemma-Ansatz -->
<!-- =========================-->
<!ELEMENT lemma-position (((lemma, lemma-varianten?, grammatik-position) | (lemma-position | zusatz)+), etymologie-position?) >
<!-- Lemma -->
<!ELEMENT lemma (#PCDATA) >
<!ATTLIST lemma
klassisch (ja | nein) "ja"
gesichert (ja | nein) "ja"
>
<!-- Lemma-Varianten -->
<!ELEMENT lemma-varianten (lemma-variante+, zusatz?) >
<!ELEMENT lemma-variante (#PCDATA) >
<!ATTLIST lemma-variante kurz CDATA #IMPLIED >
<!-- Grammatik-Position -->
<!ELEMENT grammatik-position (grammatik, grammatik-varianten?) >
<!-- Grammatikangaben -->
<!ELEMENT grammatik (#PCDATA) >
<!ATTLIST grammatik
wortart (nomen | adjektiv | verb) #REQUIRED
klasse (us-i | a-ae | um-i | x-cis) #IMPLIED
genus (m | f | n) #IMPLIED
>
<!-- Grammatik-Varianten -->
<!ELEMENT grammatik-varianten (grammatik-variante+) >
<!ELEMENT grammatik-variante (grammatik, beleg) >
<!-- Etymologie-Position -->
<!ELEMENT etymologie-position (etymologie-variante+) >
<!ELEMENT etymologie-variante (etymologie-besonderheit?, etymologie?, beleg) >
<!ELEMENT etymologie (#PCDATA) >
<!ATTLIST etymologie-variante sprache (griech | lat) #IMPLIED >
<!ELEMENT etymologie-besonderheit (#PCDATA) >
<!-- =========================-->
<!-- Artikelkopf -->
<!-- =========================-->
<!ELEMENT artikelkopf ((schreibweisen-position, struktur-position?, gebrauch-position?, metrik-position?, verwechslung-position?) |
(struktur-position, gebrauch-position?, metrik-position?, verwechslung-position?) |
(gebrauch-position, metrik-position?, verwechslung-position?) |
(metrik-position, verwechslung-position?) |
verwechslung-position) >
<!-- Schreibweisen-Position -->
<!ELEMENT schreibweisen-position (schreibweisen-variante)+ >
<!ELEMENT schreibweisen-variante (schreibweise-besonderheit?, schreibweise, (zusatz, schreibweise)*, (beleg, (zusatz | beleg))*) >
<!ELEMENT schreibweise-besonderheit (#PCDATA) >
<!ELEMENT schreibweise (#PCDATA) >
<!ATTLIST schreibweise-besonderheit typ (script | form | script-form) #IMPLIED >
<!-- Position für Strukturelle/Grammatische Besonderheiten -->
<!ELEMENT struktur-position (struktur-variante+) >
<!ELEMENT struktur-variante (struktur-besonderheit?, struktur?, beleg) >
<!ELEMENT struktur-besonderheit (#PCDATA) >
<!ELEMENT struktur (#PCDATA) >
<!ATTLIST struktur-besonderheit typ (pendet | struct | struct-nota) #IMPLIED >
<!-- Position zu Gebrauchs-Besonderheiten -->
<!ELEMENT gebrauch-position (gebrauch-variante+) >
<!ELEMENT gebrauch-variante (gebrauch-besonderheit?, gebrauch?, beleg) >
<!ELEMENT gebrauch-besonderheit (#PCDATA) >
<!ELEMENT gebrauch (#PCDATA) >
<!ATTLIST gebrauch-besonderheit typ (usu | partic) #IMPLIED >
<!-- Position zu Metrisch / Rhythmischen Besonderheiten -->
<!ELEMENT metrik-position (metrik-variante+) >
<!ELEMENT metrik-variante (metrik-besonderheit?, metrik?, beleg) >
<!ELEMENT metrik-besonderheit (#PCDATA) >
<!ELEMENT metrik (#PCDATA) >
<!ATTLIST metrik-besonderheit typ (metr | rhythm) #IMPLIED >
<!-- Position zu Verwechselungsgefahren -->
<!ELEMENT verwechslung-position (verwechslung-variante+) >
<!ELEMENT verwechslung-variante (verwechslung-besonderheit?, verwechslung?, beleg) >
<!ELEMENT verwechslung-besonderheit (#PCDATA) >
<!ELEMENT verwechslung (#PCDATA) >
<!ATTLIST verwechslung-besonderheit typ (confunditur) "confunditur" >
<!-- =========================-->
<!-- Hauptteil: Bedeutungsposition -->
<!-- =========================-->
<!ELEMENT bedeutung-position (bedeutung+) >
<!ELEMENT bedeutung (((klassifikation, interpretament-zusatz?) | (interpretament-zusatz) | (interpretament, interpretament-deutsch, interpretament-zusatz*)), (bedeutung | beleg-position)+) >
<!ELEMENT klassifikation (#PCDATA) >
<!ATTLIST bedeutung nr CDATA #REQUIRED >
<!-- Interpretament -->
<!ELEMENT interpretament (#PCDATA) >
<!ELEMENT interpretament-deutsch (#PCDATA| verweis)* >
<!ELEMENT interpretament-zusatz (#PCDATA | verweis)* >
<!ATTLIST interpretament typ (lat | griech | botan) "lat" >
<!-- Beleg-Position -->
<!ELEMENT beleg-position (beleg+, zusatz?) >
<!-- Artikelverfasser -->
<!ELEMENT artikel-verfasser (#PCDATA) >
<!-- =========================-->
<!--Elemente an verschiedenen Stellen der Struktur -->
<!-- =========================-->
<!-- Zusätze an verschiedenen Stellen der Struktur -->
<!ELEMENT zusatz (#PCDATA) >
<!ATTLIST zusatz typ (al | sim | saepe | vel | vel-rarius | OFFEN) #REQUIRED >
<!-- Verweise an verschiedenen Stellen der Struktur -->
<!ELEMENT verweis EMPTY >
<!ATTLIST verweis
typ (beleg | artikel | literatur) #REQUIRED
ziel CDATA #REQUIRED
>
<!-- Belege an verschiedenen Stellen der Struktur -->
<!--<!ELEMENT beleg ((beleg-quelle, beleg-text) | (verweis+, zusatz?)) >-->
<!ELEMENT beleg ((beleg-quelle, beleg-text) | verweis) >
<!-- Belegquelle -->
<!ELEMENT beleg-quelle (autor, werk, stelle, datierung?) >
<!ELEMENT autor (#PCDATA) >
<!ELEMENT werk (#PCDATA) >
<!ELEMENT stelle (#PCDATA | hoch)* >
<!ELEMENT datierung (#PCDATA) >
<!ATTLIST beleg
id ID #REQUIRED
>
<!-- Belegtext -->
<!ELEMENT beleg-text (#PCDATA | lemma-beleg | redaktion-ergaenzung | lesart | hervorhebung | sigle | %textauszeichnungen;)* >
<!ELEMENT redaktion-ergaenzung (#PCDATA | %textauszeichnungen;)* > <!-- redaktionelle Ergänzungen des Artikelverfassers; die runden Klammern werden generiert -->
<!ELEMENT lemma-beleg (#PCDATA) > <!-- das Vorkommen des Lemmas im Belegtext, erfasst wird die Langform; die Kurzform kommt in das Attribut kurzform -->
<!ELEMENT lesart (#PCDATA | redaktion-ergaenzung)* > <!-- verschiedene Lesarten/Lemmavarianten, die hier mit aufgeführt werden -->
<!ELEMENT hervorhebung (#PCDATA) > <!-- kleine Eckchen vor einer runden Klammer, die als Lesart bezeichnet wird -->
<!ELEMENT sigle (#PCDATA) > <!-- Sigle einer Literaturstelle -->
<!ATTLIST lemma-beleg kurzform CDATA #IMPLIED >
<!-- Textauszeichnung an verschiedenen Stellen -->
<!ELEMENT kursiv (#PCDATA | gerade)* >
<!ELEMENT gesperrt (#PCDATA) >
<!ELEMENT gerade (#PCDATA | kursiv)* >
<!ELEMENT hoch (#PCDATA) >
imperium
(inp-), -i n.
script. : hym- : p. 1404, 28. em- :
Chron. Fred. 2,35sqq. capit. p. 43. 2,36 p. 60,10. ym- :
Chart. Sangall. A 194. impir- : p. 1404, 39. form. sing. : gen. :
-ri : l. 57. adde Annal. Plac. a. 1266 p. 516,21. -iae :
Chron. Fred. 2,33. p. 56,22. 2,35. abl. -um :
Chron. Fred. 2,15. 2,35sqq. capit. p. 43. confunditur c. imperitus :
p. 1404, 61.
I iussum, praeceptum, mandatum - Befehl, Anweisung, Auftrag :
A proprie :
1 in univ. : Leg. Burgund. Rom. 38,1 si quis ... nullo metu aut
imperio ad pacta venire conpellitur, sed libera voluntate pactum
inisse cognuscitur eqs. Ionas Bob. Columb. 1,17 p. 185,9 quae
(sc. ferae) ad imperium eius statim veniebant. Lex Baiuv. 1,10 hoc
(sc. lex) per inperium regis vel iudicis fiat. Arbeo Emm. 16 p. 49,17