Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
9.2.2023: Due to updates GitLab will be unavailable for some minutes between 9:00 and 11:00.
Open sidebar
badw-it
DHParser
Commits
b1784f9f
Commit
b1784f9f
authored
Sep 18, 2017
by
Eckhart Arnold
Browse files
Merge branch 'master' of gitlab.lrz.de:badw-it/DHParser
parents
d06d7ead
468e2931
Changes
23
Hide whitespace changes
Inline
Side-by-side
.gitignore
View file @
b1784f9f
...
...
@@ -22,3 +22,4 @@ build/
dist/
MANIFEST
playground/*
DevScripts/DHParser.py
DHParser/__init__.py
View file @
b1784f9f
...
...
@@ -18,6 +18,7 @@ implied. See the License for the specific language governing
permissions and limitations under the License.
"""
from
.base
import
*
from
.dsl
import
*
from
.ebnf
import
*
from
.parser
import
*
...
...
@@ -30,4 +31,4 @@ from .versionnumber import __version__
__author__
=
"Eckhart Arnold <arnold@badw.de>"
__copyright__
=
"http://www.apache.org/licenses/LICENSE-2.0"
# __all__ = ['toolkit', 'syntaxtree', 'parser', 'transform', 'ebnf', 'dsl', 'testing', 'versionnumber'] # flat namespace
# __all__ = ['toolkit',
'base',
'syntaxtree', 'parser', 'transform', 'ebnf', 'dsl', 'testing', 'versionnumber'] # flat namespace
DHParser/base.py
0 → 100644
View file @
b1784f9f
"""base.py - various base classes that are used across several other
the DHParser-modules.
Copyright 2016 by Eckhart Arnold (arnold@badw.de)
Bavarian Academy of Sciences an Humanities (badw.de)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
"""
import
collections
from
typing
import
Hashable
,
Iterable
,
Iterator
,
Optional
,
Tuple
__all__
=
(
'ParserBase'
,
'WHITESPACE_PTYPE'
,
'TOKEN_PTYPE'
,
'MockParser'
,
'ZombieParser'
,
'ZOMBIE_PARSER'
,
'Error'
,
'is_error'
,
'is_warning'
,
'has_errors'
,
'only_errors'
,
'StringView'
,
'EMPTY_STRING_VIEW'
)
#######################################################################
#
# parser base and mock parsers
#
#######################################################################
class
ParserBase
:
"""
ParserBase is the base class for all real and mock parser classes.
It is defined here, because Node objects require a parser object
for instantiation.
"""
def
__init__
(
self
,
name
=
''
):
# , pbases=frozenset()):
self
.
name
=
name
# type: str
self
.
_ptype
=
':'
+
self
.
__class__
.
__name__
# type: str
def
__repr__
(
self
):
return
self
.
name
+
self
.
ptype
def
__str__
(
self
):
return
self
.
name
+
(
' = '
if
self
.
name
else
''
)
+
repr
(
self
)
@
property
def
ptype
(
self
)
->
str
:
return
self
.
_ptype
@
property
def
repr
(
self
)
->
str
:
return
self
.
name
if
self
.
name
else
repr
(
self
)
WHITESPACE_PTYPE
=
':Whitespace'
TOKEN_PTYPE
=
':Token'
class
MockParser
(
ParserBase
):
"""
MockParser objects can be used to reconstruct syntax trees from a
serialized form like S-expressions or XML. Mock objects can mimic
different parser types by assigning them a ptype on initialization.
Mock objects should not be used for anything other than
syntax tree (re-)construction. In all other cases where a parser
object substitute is needed, chose the singleton ZOMBIE_PARSER.
"""
def
__init__
(
self
,
name
=
''
,
ptype
=
''
):
# , pbases=frozenset()):
assert
not
ptype
or
ptype
[
0
]
==
':'
super
(
MockParser
,
self
).
__init__
(
name
)
self
.
name
=
name
self
.
_ptype
=
ptype
or
':'
+
self
.
__class__
.
__name__
class
ZombieParser
(
MockParser
):
"""
Serves as a substitute for a Parser instance.
``ZombieParser`` is the class of the singelton object
``ZOMBIE_PARSER``. The ``ZOMBIE_PARSER`` has a name and can be
called, but it never matches. It serves as a substitute where only
these (or one of these properties) is needed, but no real Parser-
object is instantiated.
"""
alive
=
False
def
__init__
(
self
):
super
(
ZombieParser
,
self
).
__init__
(
"__ZOMBIE__"
)
assert
not
self
.
__class__
.
alive
,
"There can be only one!"
assert
self
.
__class__
==
ZombieParser
,
"No derivatives, please!"
self
.
__class__
.
alive
=
True
def
__copy__
(
self
):
return
self
def
__deepcopy__
(
self
,
memo
):
return
self
def
__call__
(
self
,
text
):
"""Better call Saul ;-)"""
return
None
,
text
ZOMBIE_PARSER
=
ZombieParser
()
#######################################################################
#
# error reporting
#
#######################################################################
class
Error
:
__slots__
=
[
'message'
,
'level'
,
'code'
,
'pos'
,
'line'
,
'column'
]
WARNING
=
1
ERROR
=
1000
HIGHEST
=
ERROR
def
__init__
(
self
,
message
:
str
,
level
:
int
=
ERROR
,
code
:
Hashable
=
0
):
self
.
message
=
message
assert
level
>=
0
self
.
level
=
level
or
Error
.
ERROR
self
.
code
=
code
self
.
pos
=
-
1
self
.
line
=
-
1
self
.
column
=
-
1
def
__str__
(
self
):
prefix
=
''
if
self
.
line
>
0
:
prefix
=
"line: %3i, column: %2i, "
%
(
self
.
line
,
self
.
column
)
return
prefix
+
"%s: %s"
%
(
self
.
level_str
,
self
.
message
)
@
property
def
level_str
(
self
):
return
"Warning"
if
is_warning
(
self
.
level
)
else
"Error"
def
is_warning
(
level
:
int
)
->
bool
:
return
level
<
Error
.
ERROR
def
is_error
(
level
:
int
)
->
bool
:
return
level
>=
Error
.
ERROR
def
has_errors
(
messages
:
Iterable
[
Error
],
level
:
int
=
Error
.
ERROR
)
->
bool
:
"""
Returns True, if at least one entry in `messages` has at
least the given error `level`.
"""
for
err_obj
in
messages
:
if
err_obj
.
level
>=
level
:
return
True
return
False
def
only_errors
(
messages
:
Iterable
[
Error
],
level
:
int
=
Error
.
ERROR
)
->
Iterator
[
Error
]:
"""
Returns an Iterator that yields only those messages that have
at least the given error level.
"""
return
(
err
for
err
in
messages
if
err
.
level
>=
level
)
#######################################################################
#
# string view
#
#######################################################################
class
StringView
(
collections
.
abc
.
Sized
):
""""A rudimentary StringView class, just enough for the use cases
in parser.py.
Slicing Python-strings always yields copies of a segment of the original
string. See: https://mail.python.org/pipermail/python-dev/2008-May/079699.html
However, this becomes costly (in terms of space and as a consequence also
time) when parsing longer documents. Unfortunately, Python's `memoryview`
does not work for unicode strings. Hence, the StringView class.
"""
__slots__
=
[
'text'
,
'begin'
,
'end'
,
'len'
,
'fullstring_flag'
]
def
__init__
(
self
,
text
:
str
,
begin
:
Optional
[
int
]
=
0
,
end
:
Optional
[
int
]
=
None
)
->
None
:
self
.
text
=
text
# type: str
self
.
begin
=
0
# type: int
self
.
end
=
0
# type: int
self
.
begin
,
self
.
end
=
StringView
.
real_indices
(
begin
,
end
,
len
(
text
))
self
.
len
=
max
(
self
.
end
-
self
.
begin
,
0
)
self
.
fullstring_flag
=
(
self
.
begin
==
0
and
self
.
len
==
len
(
self
.
text
))
@
staticmethod
def
real_indices
(
begin
,
end
,
len
):
def
pack
(
index
,
len
):
index
=
index
if
index
>=
0
else
index
+
len
return
0
if
index
<
0
else
len
if
index
>
len
else
index
if
begin
is
None
:
begin
=
0
if
end
is
None
:
end
=
len
return
pack
(
begin
,
len
),
pack
(
end
,
len
)
def
__bool__
(
self
):
return
bool
(
self
.
text
)
and
self
.
end
>
self
.
begin
def
__len__
(
self
):
return
self
.
len
def
__str__
(
self
):
if
self
.
fullstring_flag
:
# optimization: avoid slicing/copying
return
self
.
text
return
self
.
text
[
self
.
begin
:
self
.
end
]
def
__getitem__
(
self
,
index
):
# assert isinstance(index, slice), "As of now, StringView only allows slicing."
# assert index.step is None or index.step == 1, \
# "Step sizes other than 1 are not yet supported by StringView"
start
,
stop
=
StringView
.
real_indices
(
index
.
start
,
index
.
stop
,
self
.
len
)
return
StringView
(
self
.
text
,
self
.
begin
+
start
,
self
.
begin
+
stop
)
def
__eq__
(
self
,
other
):
return
str
(
self
)
==
str
(
other
)
# PERFORMANCE WARNING: This creates copies of the strings
def
count
(
self
,
sub
,
start
=
None
,
end
=
None
)
->
int
:
if
self
.
fullstring_flag
:
return
self
.
text
.
count
(
sub
,
start
,
end
)
elif
start
is
None
and
end
is
None
:
return
self
.
text
.
count
(
sub
,
self
.
begin
,
self
.
end
)
else
:
start
,
end
=
StringView
.
real_indices
(
start
,
end
,
self
.
len
)
return
self
.
text
.
count
(
sub
,
self
.
begin
+
start
,
self
.
begin
+
end
)
def
find
(
self
,
sub
,
start
=
None
,
end
=
None
)
->
int
:
if
self
.
fullstring_flag
:
return
self
.
text
.
find
(
sub
,
start
,
end
)
elif
start
is
None
and
end
is
None
:
return
self
.
text
.
find
(
sub
,
self
.
begin
,
self
.
end
)
-
self
.
begin
else
:
start
,
end
=
StringView
.
real_indices
(
start
,
end
,
self
.
len
)
return
self
.
text
.
find
(
sub
,
self
.
begin
+
start
,
self
.
begin
+
end
)
-
self
.
begin
def
rfind
(
self
,
sub
,
start
=
None
,
end
=
None
)
->
int
:
if
self
.
fullstring_flag
:
return
self
.
text
.
rfind
(
sub
,
start
,
end
)
if
start
is
None
and
end
is
None
:
return
self
.
text
.
rfind
(
sub
,
self
.
begin
,
self
.
end
)
-
self
.
begin
else
:
start
,
end
=
StringView
.
real_indices
(
start
,
end
,
self
.
len
)
return
self
.
text
.
rfind
(
sub
,
self
.
begin
+
start
,
self
.
begin
+
end
)
-
self
.
begin
def
startswith
(
self
,
prefix
:
str
,
start
:
int
=
0
,
end
:
Optional
[
int
]
=
None
)
->
bool
:
start
+=
self
.
begin
end
=
self
.
end
if
end
is
None
else
self
.
begin
+
end
return
self
.
text
.
startswith
(
prefix
,
start
,
end
)
def
match
(
self
,
regex
):
return
regex
.
match
(
self
.
text
,
pos
=
self
.
begin
,
endpos
=
self
.
end
)
def
index
(
self
,
absolute_index
:
int
)
->
int
:
"""
Converts an index for a string watched by a StringView object
to an index relative to the string view object, e.g.:
>>> sv = StringView('xxIxx')[2:3]
>>> match = sv.match(re.compile('I'))
>>> match.end()
3
>>> sv.index(match.end())
1
"""
return
absolute_index
-
self
.
begin
def
indices
(
self
,
absolute_indices
:
Iterable
[
int
])
->
Tuple
[
int
,
...]:
"""Converts indices for a string watched by a StringView object
to indices relative to the string view object. See also: `sv_index()`
"""
return
tuple
(
index
-
self
.
begin
for
index
in
absolute_indices
)
def
search
(
self
,
regex
):
return
regex
.
search
(
self
.
text
,
pos
=
self
.
begin
,
endpos
=
self
.
end
)
EMPTY_STRING_VIEW
=
StringView
(
''
)
\ No newline at end of file
DHParser/dsl.py
View file @
b1784f9f
...
...
@@ -15,7 +15,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
Module ``
DSLsupport
`` contains various functions to support the
Module ``
dsl
`` contains various functions to support the
compilation of domain specific languages based on an EBNF-grammar.
"""
...
...
@@ -26,9 +26,9 @@ try:
except
ImportError
:
import
re
try
:
from
typing
import
Any
,
cast
,
Tuple
,
Union
,
Iterable
from
typing
import
Any
,
cast
,
Tuple
,
Union
,
Iterator
,
Iterable
except
ImportError
:
from
.typing34
import
Any
,
cast
,
Tuple
,
Union
,
Iterable
from
.typing34
import
Any
,
cast
,
Tuple
,
Union
,
Iterator
,
Iterable
from
DHParser.ebnf
import
EBNFCompiler
,
grammar_changed
,
\
get_ebnf_preprocessor
,
get_ebnf_grammar
,
get_ebnf_transformer
,
get_ebnf_compiler
,
\
...
...
@@ -36,6 +36,7 @@ from DHParser.ebnf import EBNFCompiler, grammar_changed, \
from
DHParser.toolkit
import
logging
,
load_if_file
,
is_python_code
,
compile_python_object
from
DHParser.parser
import
Grammar
,
Compiler
,
compile_source
,
nil_preprocessor
,
PreprocessorFunc
from
DHParser.syntaxtree
import
Node
,
TransformationFunc
from
DHParser.base
import
Error
,
is_error
,
has_errors
,
only_errors
__all__
=
(
'GrammarError'
,
'CompilationError'
,
...
...
@@ -120,31 +121,48 @@ if __name__ == "__main__":
'''
class
GrammarError
(
Exception
):
class
DSLException
(
Exception
):
"""
Base class for DSL-exceptions.
"""
def
__init__
(
self
,
errors
):
assert
isinstance
(
errors
,
Iterator
)
or
isinstance
(
errors
,
list
)
\
or
isinstance
(
errors
,
tuple
)
self
.
errors
=
errors
def
__str__
(
self
):
return
'
\n
'
.
join
(
str
(
err
)
for
err
in
self
.
errors
)
class
GrammarError
(
DSLException
):
"""
Raised when (already) the grammar of a domain specific language (DSL)
contains errors.
"""
def
__init__
(
self
,
error_messages
,
grammar_src
):
self
.
error_messages
=
error_messages
def
__init__
(
self
,
errors
,
grammar_src
):
super
().
__init__
(
errors
)
self
.
grammar_src
=
grammar_src
class
CompilationError
(
Exception
):
class
CompilationError
(
DSL
Exception
):
"""
Raised when a string or file in a domain specific language (DSL)
contains errors.
"""
def
__init__
(
self
,
error
_message
s
,
dsl_text
,
dsl_grammar
,
AST
,
result
):
s
elf
.
error_messages
=
error_messages
def
__init__
(
self
,
errors
,
dsl_text
,
dsl_grammar
,
AST
,
result
):
s
uper
().
__init__
(
errors
)
self
.
dsl_text
=
dsl_text
self
.
dsl_grammar
=
dsl_grammar
self
.
AST
=
AST
self
.
result
=
result
def
__str__
(
self
):
return
'
\n
'
.
join
(
self
.
error_messages
)
def
error_str
(
messages
:
Iterable
[
Error
])
->
str
:
"""
Returns all true errors (i.e. not just warnings) from the
`messages` as a concatenated multiline string.
"""
return
'
\n\n
'
.
join
(
str
(
m
)
for
m
in
messages
if
is_error
(
m
.
level
))
def
grammar_instance
(
grammar_representation
)
->
Tuple
[
Grammar
,
str
]:
...
...
@@ -158,13 +176,13 @@ def grammar_instance(grammar_representation) -> Tuple[Grammar, str]:
# read grammar
grammar_src
=
load_if_file
(
grammar_representation
)
if
is_python_code
(
grammar_src
):
parser_py
,
error
s
,
AST
=
grammar_src
,
''
,
None
parser_py
,
message
s
,
AST
=
grammar_src
,
[]
,
None
else
:
with
logging
(
False
):
parser_py
,
error
s
,
AST
=
compile_source
(
grammar_src
,
None
,
parser_py
,
message
s
,
AST
=
compile_source
(
grammar_src
,
None
,
get_ebnf_grammar
(),
get_ebnf_transformer
(),
get_ebnf_compiler
())
if
errors
:
raise
GrammarError
(
'
\n\n
'
.
join
(
error
s
),
grammar_src
)
if
has_
errors
(
messages
)
:
raise
GrammarError
(
only_errors
(
message
s
),
grammar_src
)
parser_root
=
compile_python_object
(
DHPARSER_IMPORTS
+
parser_py
,
'\w+Grammar$'
)()
else
:
# assume that dsl_grammar is a ParserHQ-object or Grammar class
...
...
@@ -194,11 +212,11 @@ def compileDSL(text_or_file: str,
assert
isinstance
(
compiler
,
Compiler
)
parser
,
grammar_src
=
grammar_instance
(
dsl_grammar
)
result
,
error
s
,
AST
=
compile_source
(
text_or_file
,
preprocessor
,
parser
,
result
,
message
s
,
AST
=
compile_source
(
text_or_file
,
preprocessor
,
parser
,
ast_transformation
,
compiler
)
if
errors
:
if
has_
errors
(
messages
)
:
src
=
load_if_file
(
text_or_file
)
raise
CompilationError
(
errors
,
src
,
grammar_src
,
AST
,
result
)
raise
CompilationError
(
only_
errors
(
messages
)
,
src
,
grammar_src
,
AST
,
result
)
return
result
...
...
@@ -298,10 +316,10 @@ def load_compiler_suite(compiler_suite: str) -> \
else
:
# assume source is an ebnf grammar. Is there really any reasonable application case for this?
with
logging
(
False
):
compile_py
,
error
s
,
AST
=
compile_source
(
source
,
None
,
compile_py
,
message
s
,
AST
=
compile_source
(
source
,
None
,
get_ebnf_grammar
(),
get_ebnf_transformer
(),
get_ebnf_compiler
())
if
errors
:
raise
GrammarError
(
'
\n\n
'
.
join
(
error
s
),
source
)
if
has_
errors
(
messages
)
:
raise
GrammarError
(
only_errors
(
message
s
),
source
)
preprocessor
=
get_ebnf_preprocessor
parser
=
get_ebnf_grammar
ast
=
get_ebnf_transformer
...
...
@@ -388,8 +406,7 @@ def compile_on_disk(source_file: str, compiler_suite="", extension=".xml"):
extension.
Returns:
A list of error messages or an empty list if there were no
errors.
A (potentially empty) list of error or warning messages.
"""
filepath
=
os
.
path
.
normpath
(
source_file
)
# with open(source_file, encoding="utf-8") as f:
...
...
@@ -405,9 +422,9 @@ def compile_on_disk(source_file: str, compiler_suite="", extension=".xml"):
cfactory
=
get_ebnf_compiler
compiler1
=
cfactory
()
compiler1
.
set_grammar_name
(
compiler_name
,
source_file
)
result
,
error
s
,
ast
=
compile_source
(
source_file
,
sfactory
(),
pfactory
(),
tfactory
(),
compiler1
)
if
errors
:
return
error
s
result
,
message
s
,
ast
=
compile_source
(
source_file
,
sfactory
(),
pfactory
(),
tfactory
(),
compiler1
)
if
has_
errors
(
messages
)
:
return
message
s
elif
cfactory
==
get_ebnf_compiler
:
# trans == get_ebnf_transformer or trans == EBNFTransformer: # either an EBNF- or no compiler suite given
ebnf_compiler
=
cast
(
EBNFCompiler
,
compiler1
)
...
...
@@ -484,7 +501,7 @@ def compile_on_disk(source_file: str, compiler_suite="", extension=".xml"):
finally
:
if
f
:
f
.
close
()
return
[]
return
messages
def
recompile_grammar
(
ebnf_filename
,
force
=
False
)
->
bool
:
...
...
@@ -511,19 +528,20 @@ def recompile_grammar(ebnf_filename, force=False) -> bool:
base
,
ext
=
os
.
path
.
splitext
(
ebnf_filename
)
compiler_name
=
base
+
'Compiler.py'
error_file_name
=
base
+
'_ebnf_ERRORS.txt'
error
s
=
[]
# type: Iterable[str]
message
s
=
[]
# type: Iterable[str]
if
(
not
os
.
path
.
exists
(
compiler_name
)
or
force
or
grammar_changed
(
compiler_name
,
ebnf_filename
)):
# print("recompiling parser for: " + ebnf_filename)
error
s
=
compile_on_disk
(
ebnf_filename
)
if
error
s
:
message
s
=
compile_on_disk
(
ebnf_filename
)
if
message
s
:
# print("Errors while compiling: " + ebnf_filename + '!')
with
open
(
error_file_name
,
'w'
)
as
f
:
for
e
in
error
s
:
f
.
write
(
e
)
with
open
(
error_file_name
,
'w'
,
encoding
=
"UTF-8"
)
as
f
:
for
e
in
message
s
:
f
.
write
(
str
(
e
)
)
f
.
write
(
'
\n
'
)
return
False
if
has_errors
(
messages
):
return
False
if
not
error
s
and
os
.
path
.
exists
(
error_file_name
):
if
not
message
s
and
os
.
path
.
exists
(
error_file_name
):
os
.
remove
(
error_file_name
)
return
True
DHParser/ebnf.py
View file @
b1784f9f
...
...
@@ -33,7 +33,8 @@ from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name
from
DHParser.parser
import
Grammar
,
mixin_comment
,
nil_preprocessor
,
Forward
,
RE
,
NegativeLookahead
,
\
Alternative
,
Series
,
Option
,
Required
,
OneOrMore
,
ZeroOrMore
,
Token
,
Compiler
,
\
PreprocessorFunc
from
DHParser.syntaxtree
import
WHITESPACE_PTYPE
,
TOKEN_PTYPE
,
Node
,
TransformationFunc
from
DHParser.syntaxtree
import
Node
,
TransformationFunc
from
DHParser.base
import
WHITESPACE_PTYPE
,
TOKEN_PTYPE
,
Error
from
DHParser.transform
import
TransformationDict
,
traverse
,
remove_brackets
,
\
reduce_single_child
,
replace_by_single_child
,
remove_expendables
,
\
remove_tokens
,
flatten
,
forbid
,
assert_content
,
remove_infix_operator
...
...
@@ -397,8 +398,7 @@ class EBNFCompiler(Compiler):
'literalws'
:
[
'right'
],
'tokens'
:
set
(),
# alt. 'preprocessor_tokens'
'filter'
:
dict
(),
# alt. 'filter'
'ignorecase'
:
False
,
'testing'
:
False
}
'ignorecase'
:
False
}
@
property
def
result
(
self
)
->
str
:
...
...
@@ -544,22 +544,18 @@ class EBNFCompiler(Compiler):
# check for unconnected rules
if
not
self
.
directives
[
'testing'
]:
defined_symbols
.
difference_update
(
self
.
RESERVED_SYMBOLS
)
def
remove_connections
(
symbol
):
if
symbol
in
defined_symbols
:
defined_symbols
.
remove
(
symbol
)
for
related
in
self
.
rules
[
symbol
][
1
:]:
remove_connections
(
str
(
related
))
remove_connections
(
self
.
root_symbol
)
for
leftover
in
defined_symbols
:
self
.
rules
[
leftover
][
0
].
add_error
((
'Rule "%s" is not connected to parser '
'root "%s" !'
)
%
(
leftover
,
self
.
root_symbol
)
+
' (Use directive "@testing=True" '
'to supress this error message.)'
)
# root_node.error_flag = True
defined_symbols
.
difference_update
(
self
.
RESERVED_SYMBOLS
)
def
remove_connections
(
symbol
):
if
symbol
in
defined_symbols
:
defined_symbols
.
remove
(
symbol
)
for
related
in
self
.
rules
[
symbol
][
1
:]:
remove_connections
(
str
(
related
))
remove_connections
(
self
.
root_symbol
)
for
leftover
in
defined_symbols
:
self
.
rules
[
leftover
][
0
].
add_error
((
'Rule "%s" is not connected to '
'parser root "%s" !'
)
%
(
leftover
,
self
.
root_symbol
),
Error
.
WARNING
)
# set root_symbol parser and assemble python grammar definition
...
...
@@ -587,7 +583,7 @@ class EBNFCompiler(Compiler):
else
:
assert
nd
.
parser
.
name
==
"directive"
,
nd
.
as_sxpr
()
self
.
compile
(
nd
)
node
.
error_flag
=
node
.
error_flag
or
nd
.
error_flag
node
.
error_flag
=
max
(
node
.
error_flag
,
nd
.
error_flag
)
self
.
definitions
.
update
(
definitions
)
return
self
.
assemble_parser
(
definitions
,
node
)
...
...
@@ -679,9 +675,9 @@ class EBNFCompiler(Compiler):
if
value
:
self
.
re_flags
.
add
(
'i'
)
elif
key
==
'testing'
:
value
=
str
(
node
.
children
[
1
])
self
.
directives
[
'testing'
]
=
value
.
lower
()
not
in
{
"off"
,
"false"
,
"no"
}
#
elif key == 'testing':
#
value = str(node.children[1])
#
self.directives['testing'] = value.lower() not in {"off", "false", "no"}
elif
key
==
'literalws'
:
value
=
{
item
.
lower
()
for
item
in
self
.
compile
(
node
.
children
[
1
])}
...
...
DHParser/parser.py
View file @
b1784f9f
...
...
@@ -75,10 +75,10 @@ except ImportError:
from
.typing34
import
Any
,
Callable
,
cast
,
Dict
,
Iterator
,
List
,
Set
,
Tuple
,
Union
,
Optional
from
DHParser.toolkit
import
is_logging
,
log_dir
,
logfile_basename
,
escape_re
,
sane_parser_name
from
DHParser.syntaxtree
import
WHITESPACE_PTYPE
,
TOKEN_PTYPE
,
ZOMBIE_PARSER
,
ParserBase
,
\
Node
,
TransformationFunc
from
DHParser.toolkit
import
StringView
,
EMPTY_STRING_VIEW
,
sv_match
,
sv_index
,
sv_search
,
\
load_if_file
,
error_messages
,
line_col
from
DHParser.syntaxtree
import
Node
,
TransformationFunc
from
DHParser.base
import
ParserBase
,
WHITESPACE_PTYPE
,
TOKEN_PTYPE
,
ZOMBIE_PARSER
,
Error
,
is_error
,
has_errors
,
\
StringView
,
EMPTY_STRING_VIEW
from
DHParser.toolkit
import
load_if_file
,
error_messages
,
line_col
__all__
=
(
'PreprocessorFunc'
,
'HistoryRecord'
,
...
...
@@ -161,15 +161,17 @@ class HistoryRecord:
self
.
call_stack
=
[
p
for
p
in
call_stack
if
p
.
ptype
!=
":Forward"
]
# type: List['Parser']
self
.
node
=
node
# type: Node
self
.
remaining
=
remaining
# type: int
document
=
call_stack
[
-
1
].
grammar
.
document__
.
text
if
call_stack
else
''
self
.
line_col
=
line_col
(
document
,
len
(
document
)
-
remaining
)
# type: Tuple[int, int]
self
.
line_col
=
(
1
,
1
)
# type: Tuple[int, int]
if
call_stack
:
document
=
call_stack
[
-
1
].
grammar
.
document__
.
text