Notice to GitKraken users: A vulnerability has been found in the SSH key generation of GitKraken versions 7.6.0 to 8.0.0 (https://www.gitkraken.com/blog/weak-ssh-key-fix). If you use GitKraken and have generated a SSH key using one of these versions, please remove it both from your local workstation and from your LRZ GitLab profile.

21.10.2021, 9:00 - 11:00: Due to updates GitLab may be unavailable for some minutes between 09:00 and 11:00.

Commit 603b99bb authored by Eckhart Arnold's avatar Eckhart Arnold
Browse files

drop empty anonymous nodes while parsing already

parent d004ab8c
......@@ -38,7 +38,7 @@ import copy
import re
from DHParser.preprocess import strip_tokens, with_source_mapping, PreprocessorFunc
from DHParser.syntaxtree import Node, RootNode, ZOMBIE_ROOTNODE, StrictResultType
from DHParser.syntaxtree import Node, RootNode, ZOMBIE_TAG, StrictResultType
from DHParser.transform import TransformationFunc
from DHParser.parse import Grammar
from DHParser.error import adjust_error_locations, is_error, Error
......@@ -71,6 +71,9 @@ def visitor_name(node_name: str) -> str:
return 'on_' + node_name
ROOTNODE_PLACEHOLDER = RootNode()
class Compiler:
"""
Class Compiler is the abstract base class for compilers. Compiler
......@@ -104,7 +107,7 @@ class Compiler:
self._reset()
def _reset(self):
self.tree = ZOMBIE_ROOTNODE # type: RootNode
self.tree = ROOTNODE_PLACEHOLDER # type: RootNode
self.context = [] # type: List[Node]
self._dirty_flag = False
......@@ -116,6 +119,7 @@ class Compiler:
(This very much depends on the kind and purpose of the
implemented compiler.)
"""
assert root.tag_name != ZOMBIE_TAG
if self._dirty_flag:
self._reset()
self._dirty_flag = True
......
......@@ -37,8 +37,8 @@ from DHParser.error import Error, linebreaks, line_col
from DHParser.log import is_logging, HistoryRecord
from DHParser.preprocess import BEGIN_TOKEN, END_TOKEN, RX_TOKEN_NAME
from DHParser.stringview import StringView, EMPTY_STRING_VIEW
from DHParser.syntaxtree import Node, RootNode, WHITESPACE_PTYPE, \
TOKEN_PTYPE, ZOMBIE, ResultType
from DHParser.syntaxtree import Node, FrozenNode, RootNode, WHITESPACE_PTYPE, \
TOKEN_PTYPE, ZOMBIE_TAG, ResultType
from DHParser.toolkit import sane_parser_name, escape_control_characters, re, typing, cython
from typing import Callable, cast, List, Tuple, Set, Dict, DefaultDict, Union, Optional, Any
......@@ -78,6 +78,10 @@ __all__ = ('Parser',
'Forward')
EMPTY_NODE = FrozenNode(':EMPTY__', '')
########################################################################
#
# Parser base class
......@@ -140,7 +144,7 @@ def reentry_point(rest: StringView, rules: ResumeList) -> int:
if i == upper_limit:
i = -1
return i
# return Node(None, rest[:i]), rest[i:]
# return Node(ZOMBIE_TAG, rest[:i]), rest[i:]
ApplyFunc = Callable[['Parser'], None]
......@@ -201,7 +205,7 @@ class Parser:
(recursively) a second time, if it has already been
applied to this parser.
grammar: A reference to the Grammar object to which the parser
_grammar: A reference to the Grammar object to which the parser
is attached.
"""
......@@ -209,8 +213,10 @@ class Parser:
# assert isinstance(name, str), str(name)
self.pname = '' # type: str
self.tag_name = self.ptype # type: str
if not isinstance(self, ZombieParser):
self._grammar = ZOMBIE_GRAMMAR # type: Grammar
try:
self._grammar = GRAMMAR_PLACEHOLDER # type: Grammar
except NameError:
pass
self.reset()
def __deepcopy__(self, memo):
......@@ -256,7 +262,7 @@ class Parser:
the business intelligence that is common to all parsers. The actual parsing is
done in the overridden method `_parse()`.
"""
grammar = self._grammar # read protected member instead property to avoid function call
grammar = self._grammar
location = grammar.document_length__ - len(text)
try:
......@@ -294,7 +300,7 @@ class Parser:
# apply reentry-rule or catch error at root-parser
if i < 0:
i = 1
nd = Node(None, rest[:i])
nd = Node(ZOMBIE_TAG, rest[:i])
rest = rest[i:]
assert error.node.children
if error.first_throw:
......@@ -303,11 +309,11 @@ class Parser:
else:
# TODO: ggf. Fehlermeldung, die sagt, wo es weitergeht anfügen
# dürfte allerdings erst an den nächsten(!) Knoten angehängt werden (wie?)
node = Node(self.tag_name, (Node(None, text[:gap]), error.node, nd))
node = Node(self.tag_name, (Node(ZOMBIE_TAG, text[:gap]), error.node, nd))
elif error.first_throw:
raise ParserError(error.node, error.rest, first_throw=False)
else:
result = (Node(None, text[:gap]), error.node) if gap else error.node # type: ResultType
result = (Node(ZOMBIE_TAG, text[:gap]), error.node) if gap else error.node # type: ResultType
raise ParserError(Node(self.tag_name, result), text, first_throw=False)
if grammar.left_recursion_handling__:
......@@ -326,9 +332,10 @@ class Parser:
# otherwise also cache None-results
self.visited[location] = (None, rest)
else:
assert node._pos < 0
assert node._pos < 0 or node == EMPTY_NODE
node._pos = location
assert node._pos >= 0, str("%i < %i" % (grammar.document_length__, location))
assert node._pos >= 0 or node == EMPTY_NODE, \
str("%i < %i" % (grammar.document_length__, location))
if (grammar.last_rb__loc__ < location
and (grammar.memoization__ or location in grammar.recursion_locations__)):
# - variable manipulating parsers will not be entered into the cache,
......@@ -352,7 +359,7 @@ class Parser:
grammar.call_stack__.pop()
except RecursionError:
node = Node(None, str(text[:min(10, max(1, text.find("\n")))]) + " ...")
node = Node(ZOMBIE_TAG, str(text[:min(10, max(1, text.find("\n")))]) + " ...")
node._pos = location
grammar.tree__.new_error(node, "maximum recursion depth of parser reached; "
"potentially due to too many errors!")
......@@ -376,28 +383,34 @@ class Parser:
the results or None as well as the text at the position right behind
the matching string."""
raise NotImplementedError
# return None, text # default behaviour: don't match
@property
def grammar(self) -> 'Grammar':
if self._grammar != ZOMBIE_GRAMMAR:
try:
grammar = self._grammar
if self._grammar != GRAMMAR_PLACEHOLDER:
return self._grammar
else:
raise AssertionError('Grammar has not yet been set!')
except AttributeError:
raise AssertionError('Parser placeholder does not have a grammar!')
@grammar.setter
def grammar(self, grammar: 'Grammar'):
if self._grammar == ZOMBIE_GRAMMAR:
try:
if self._grammar == GRAMMAR_PLACEHOLDER:
self._grammar = grammar
self._grammar_assigned_notifier()
# self._grammar_assigned_notifier()
elif self._grammar != grammar:
raise AssertionError("Parser has already been assigned"
"to a different Grammar object!")
except AttributeError:
pass # ignore setting of grammar attribute for placeholder parser
def _grammar_assigned_notifier(self):
"""A function that notifies the parser object that it has been
assigned to a grammar."""
pass
# def _grammar_assigned_notifier(self):
# """A function that notifies the parser object that it has been
# assigned to a grammar."""
# pass
def _apply(self, func: ApplyFunc, flip: FlagFunc) -> bool:
"""
......@@ -449,50 +462,7 @@ class Parser:
self._apply(func, positive_flip)
class ZombieParser(Parser):
"""
Serves as a substitute for a Parser instance.
``ZombieParser`` is the class of the singelton object
``ZOMBIE_PARSER``. The ``ZOMBIE_PARSER`` has a name and can be
called, but it never matches. It serves as a substitute where only
these (or one of these properties) is needed, but no real Parser-
object is instantiated.
"""
alive = [False] # cython compatibility: cython forbits writing to class attributes
def __init__(self):
super().__init__()
self.pname = ZOMBIE
self.tag_name = ZOMBIE
# no need to call super class constructor
assert not ZombieParser.alive[0], "There can be only one!"
assert self.__class__ == ZombieParser, "No derivatives, please!"
ZombieParser.alive[0] = True
self.reset()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def __call__(self, text):
raise AssertionError("Better call Saul ;-)")
@property
def grammar(self) -> 'Grammar':
raise AssertionError("Zombie parser doesn't have a grammar!")
@grammar.setter
def grammar(self, grammar: 'Grammar'):
raise AssertionError('Cannot assign a grammar a zombie parser or vice versa!')
def apply(self, func: ApplyFunc):
return "Eaten alive..."
ZOMBIE_PARSER = ZombieParser()
PARSER_PLACEHOLDER = Parser()
########################################################################
......@@ -702,7 +672,7 @@ class Grammar:
recursion.
"""
python_src__ = '' # type: str
root__ = ZOMBIE_PARSER # type: Parser
root__ = PARSER_PLACEHOLDER # type: Parser
# root__ must be overwritten with the root-parser by grammar subclass
parser_initialization__ = ["pending"] # type: list[str]
resume_rules__ = dict() # type: Dict[str, ResumeList]
......@@ -885,7 +855,7 @@ class Grammar:
if not rest:
result, _ = parser(rest)
if result is None:
result = Node(None, '').init_pos(0)
result = Node(ZOMBIE_TAG, '').init_pos(0)
self.tree__.new_error(result,
'Parser "%s" did not match empty document.' % str(parser),
Error.PARSER_DID_NOT_MATCH)
......@@ -924,7 +894,7 @@ class Grammar:
if len(stitches) < MAX_DROPOUTS
else " too often! Terminating parser.")
error_code = Error.PARSER_STOPPED_BEFORE_END
stitches.append(Node(None, skip).init_pos(tail_pos(stitches)))
stitches.append(Node(ZOMBIE_TAG, skip).init_pos(tail_pos(stitches)))
self.tree__.new_error(stitches[-1], error_msg, error_code)
if self.history_tracking__:
# # some parsers may have matched and left history records with nodes != None.
......@@ -941,12 +911,12 @@ class Grammar:
self.history_tracking__ = False
if stitches:
if rest:
stitches.append(Node(None, rest))
stitches.append(Node(ZOMBIE_TAG, rest))
#try:
result = Node(None, tuple(stitches)).init_pos(0)
result = Node(ZOMBIE_TAG, tuple(stitches)).init_pos(0)
# except AssertionError as error:
# # some debugging output
# print(Node(None, tuple(stitches)).as_sxpr())
# print(Node(ZOMBIE_TAG, tuple(stitches)).as_sxpr())
# raise error
if any(self.variables__.values()):
error_msg = "Capture-retrieve-stack not empty after end of parsing: " \
......@@ -957,7 +927,7 @@ class Grammar:
# add another child node at the end to ensure that the position
# of the error will be the end of the text. Otherwise, the error
# message above ("...after end of parsing") would appear illogical.
error_node = Node(ZOMBIE, '').init_pos(tail_pos(result.children))
error_node = Node(ZOMBIE_TAG, '').init_pos(tail_pos(result.children))
self.tree__.new_error(error_node, error_msg, error_code)
result.result = result.children + (error_node,)
else:
......@@ -1028,7 +998,7 @@ def dsl_error_msg(parser: Parser, error_str: str) -> str:
return " ".join(msg)
ZOMBIE_GRAMMAR = Grammar()
GRAMMAR_PLACEHOLDER = Grammar()
########################################################################
......@@ -1163,21 +1133,6 @@ class RegExp(Parser):
if match:
capture = match.group(0)
end = text.index(match.end())
# regular expression must never match preprocessor-tokens!
# Should never happen, anyway, as long as text characters do not
# fall into the range below 0x20
# # Find a better solution here? e.g. static checking/re-mangling at compile time
# # Needs testing!!!
# i = capture.find(BEGIN_TOKEN)
# if i >= 0:
# capture = capture[:i]
# end = i
# m = capture[:end].match(self.regexp)
# if m:
# capture = m.group(0)
# end = text.index(m.end())
# else:
# return None, text
return Node(self.tag_name, capture, True), text[end:]
return None, text
......@@ -1217,13 +1172,17 @@ class Whitespace(RegExp):
is a RegExp-parser for whitespace."""
assert WHITESPACE_PTYPE == ":Whitespace"
# def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
# match = text.match(self.regexp)
# if match:
# capture = match.group(0)
# end = text.index(match.end())
# return Node(self.tag_name, capture, True), text[end:]
# return None, text
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
match = text.match(self.regexp)
if match:
capture = match.group(0)
if capture or self.pname:
end = text.index(match.end())
return Node(self.tag_name, capture, True), text[end:]
else:
# avoid creation of a node object for empty nodes
return EMPTY_NODE, text
return None, text
def __repr__(self):
return '~'
......@@ -1318,7 +1277,7 @@ class Option(UnaryOperator):
>>> Grammar(number)('3.14159').content
'3.14159'
>>> Grammar(number)('3.14159').structure
'(:Series (:Option) (:RegExp "3") (:Option (:RegExp ".14159")))'
'(:Series (:RegExp "3") (:Option (:RegExp ".14159")))'
>>> Grammar(number)('-1').content
'-1'
......@@ -1335,9 +1294,13 @@ class Option(UnaryOperator):
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
node, text = self.parser(text)
if node:
if node and (node._result or self.parser.pname):
return Node(self.tag_name, node), text
if self.pname:
return Node(self.tag_name, ()), text
else:
# avoid creation of a node object for empty nodes
return EMPTY_NODE, text
def __repr__(self):
return '[' + (self.parser.repr[1:-1] if isinstance(self.parser, Alternative)
......@@ -1451,7 +1414,7 @@ def mandatory_violation(grammar: Grammar,
reloc: int) -> Tuple[Error, Node, StringView]:
i = reloc if reloc >= 0 else 0
location = grammar.document_length__ - len(text_)
err_node = Node(None, text_[:i]).init_pos(location)
err_node = Node(ZOMBIE_TAG, text_[:i]).init_pos(location)
found = text_[:10].replace('\n', '\\n ')
for search, message in err_msgs:
rxs = not isinstance(search, str)
......@@ -1557,9 +1520,10 @@ class Series(NaryOperator):
else:
results += (node,)
break
if node._result or parser.pname: # optimization: drop anonymous empty nodes
results += (node,)
# assert len(results) <= len(self.parsers) \
# or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE])
# or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE_TAG])
node = Node(self.tag_name, results)
if error:
raise ParserError(node, text, first_throw=True)
......@@ -1754,6 +1718,7 @@ class AllOf(NaryOperator):
for i, parser in enumerate(parsers):
node, text__ = parser(text_)
if node:
if node._result or parser.pname:
results += (node,)
text_ = text__
del parsers[i]
......@@ -1770,7 +1735,7 @@ class AllOf(NaryOperator):
if reloc < 0:
parsers = []
assert len(results) <= len(self.parsers) \
or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE])
or len(self.parsers) >= len([p for p in results if p.tag_name != ZOMBIE_TAG])
node = Node(self.tag_name, results)
if error:
raise ParserError(node, text, first_throw=True)
......@@ -1818,6 +1783,7 @@ class SomeOf(NaryOperator):
for i, parser in enumerate(parsers):
node, text__ = parser(text_)
if node:
if node._result or parser.pname:
results += (node,)
text_ = text__
del parsers[i]
......
......@@ -34,14 +34,14 @@ from typing import Callable, cast, Iterator, List, AbstractSet, Set, Union, Tupl
__all__ = ('WHITESPACE_PTYPE',
'TOKEN_PTYPE',
'ZOMBIE',
'ZOMBIE_NODE',
'ZOMBIE_TAG',
'PLACEHOLDER',
'ResultType',
'StrictResultType',
'ChildrenType',
'Node',
'FrozenNode',
'RootNode',
'ZOMBIE_ROOTNODE',
'parse_sxpr',
'parse_xml',
'flatten_sxpr',
......@@ -58,7 +58,7 @@ __all__ = ('WHITESPACE_PTYPE',
WHITESPACE_PTYPE = ':Whitespace'
TOKEN_PTYPE = ':Token'
ZOMBIE = "__ZOMBIE__"
ZOMBIE_TAG = "__ZOMBIE__"
#######################################################################
#
......@@ -162,7 +162,7 @@ class Node: # (collections.abc.Sized): Base class omitted for cython-compatibil
__slots__ = '_result', 'children', '_len', '_pos', 'tag_name', 'errors', '_xml_attr', '_content'
def __init__(self, tag_name: Optional[str], result: ResultType, leafhint: bool = False) -> None:
def __init__(self, tag_name: str, result: ResultType, leafhint: bool = False) -> None:
"""
Initializes the ``Node``-object with the ``Parser``-Instance
that generated the node and the parser's result.
......@@ -178,12 +178,8 @@ class Node: # (collections.abc.Sized): Base class omitted for cython-compatibil
self._len = -1 # type: int # lazy evaluation
else:
self.result = result
assert tag_name is None or isinstance(tag_name, str) # TODO: Delete this line
self.tag_name = tag_name if tag_name else ZOMBIE
# if parser is None:
# self._tag_name = ZOMBIE
# else:
# self._tag_name = parser.name or parser.ptype
# assert tag_name is not None
self.tag_name = tag_name # type: str
def __deepcopy__(self, memo):
if self.children:
......@@ -693,7 +689,29 @@ class Node: # (collections.abc.Sized): Base class omitted for cython-compatibil
return sum(child.tree_size() for child in self.children) + 1
ZOMBIE_NODE = Node(ZOMBIE, '')
class FrozenNode(Node):
def __init__(self, tag_name: str, result: ResultType) -> None:
if isinstance(result, str) or isinstance(result, StringView):
result = str(result)
else:
raise TypeError('FrozenNode only accepts string as results. '
'(Only leaf-nodes can be frozen nodes.)')
super(FrozenNode, self).__init__(tag_name, result, True)
@property
def result(self) -> StrictResultType:
return self._result
@result.setter
def result(self, result: ResultType):
raise TypeError('FrozenNode does not allow re-assignment of results.')
def init_pos(self, pos: int) -> 'Node':
pass
PLACEHOLDER = Node('__PLACEHOLDER__', '')
class RootNode(Node):
......@@ -708,7 +726,7 @@ class RootNode(Node):
"""
def __init__(self, node: Optional[Node] = None):
super().__init__(ZOMBIE, '')
super().__init__(ZOMBIE_TAG, '')
self.all_errors = [] # type: List[Error]
self.error_flag = 0
if node is not None:
......@@ -804,8 +822,6 @@ class RootNode(Node):
empty_tags=self.empty_tags)
ZOMBIE_ROOTNODE = RootNode()
#######################################################################
#
# S-expression- and XML-parsers
......
......@@ -40,7 +40,7 @@ import sys
from DHParser.error import Error, is_error, adjust_error_locations
from DHParser.log import is_logging, clear_logs, log_parsing_history
from DHParser.parse import UnknownParserError, Parser, Lookahead
from DHParser.syntaxtree import Node, RootNode, parse_sxpr, flatten_sxpr, ZOMBIE
from DHParser.syntaxtree import Node, RootNode, parse_sxpr, flatten_sxpr, ZOMBIE_TAG
from DHParser.toolkit import re, typing
from typing import Tuple
......@@ -401,7 +401,7 @@ def grammar_unit(test_unit, parser_factory, transformer_factory, report=True, ve
cst = parser(test_code, parser_name, track_history=has_lookahead(parser_name))
except UnknownParserError as upe:
cst = RootNode()
cst = cst.new_error(Node(ZOMBIE, "").init_pos(0), str(upe))
cst = cst.new_error(Node(ZOMBIE_TAG, "").init_pos(0), str(upe))
clean_test_name = str(test_name).replace('*', '')
# log_ST(cst, "match_%s_%s.cst" % (parser_name, clean_test_name))
tests.setdefault('__cst__', {})[test_name] = cst
......@@ -450,7 +450,7 @@ def grammar_unit(test_unit, parser_factory, transformer_factory, report=True, ve
try:
cst = parser(test_code, parser_name, track_history=has_lookahead(parser_name))
except UnknownParserError as upe:
node = Node(ZOMBIE, "").init_pos(0)
node = Node(ZOMBIE_TAG, "").init_pos(0)
cst = RootNode(node).new_error(node, str(upe))
errata.append('Unknown parser "{}" in fail test "{}"!'.format(parser_name, test_name))
tests.setdefault('__err__', {})[test_name] = errata[-1]
......
......@@ -32,7 +32,7 @@ import inspect
from functools import partial, singledispatch
from DHParser.error import Error, ErrorCode
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE, ZOMBIE_NODE, RootNode, parse_sxpr, flatten_sxpr
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE, PLACEHOLDER, RootNode, parse_sxpr, flatten_sxpr
from DHParser.toolkit import issubtype, isgenerictype, expand_table, smart_list, re, typing
from typing import AbstractSet, Any, ByteString, Callable, cast, Container, Dict, \
Tuple, List, Sequence, Union, Text, Generic
......@@ -309,7 +309,7 @@ def traverse(root_node: Node,
nonlocal cache
node = context[-1]
if node.children:
context.append(ZOMBIE_NODE)
context.append(PLACEHOLDER)
for child in node.children:
context[-1] = child
traverse_recursive(context) # depth first
......@@ -624,7 +624,7 @@ def flatten(context: List[Node], condition: Callable = is_anonymous, recursive:
node = context[-1]
if node.children:
new_result = [] # type: List[Node]
context.append(ZOMBIE_NODE)
context.append(PLACEHOLDER)
for child in node.children:
context[-1] = child
if child.children and condition(context):
......
......@@ -55,6 +55,13 @@ def fail_on_error(src, result):
sys.exit(1)
def count_nodes(tree, condition=lambda n: True):
N = 0
for nd in tree.select(condition, include_root=True):
N += 1
return N
def tst_func():
with DHParser.log.logging(LOGGING):
if not os.path.exists('REPORT'):
......@@ -68,6 +75,9 @@ def tst_func():
print('\n\nParsing document: "%s"' % file)
result = parser(doc)
print("Number of CST-nodes: " + str(count_nodes(result)))
# print("Number of empty nodes: " + str(count_nodes(result,
# lambda n: not bool(n.result))))
if DHParser.log.is_logging():
print('Saving CST')
with open('REPORT/' + file[:-4] + '.cst', 'w', encoding='utf-8') as f:
......@@ -79,6 +89,7 @@ def tst_func():
fail_on_error(doc, result)
transformer(result)
fail_on_error(doc, result)
print("Number of AST-nodes: " + str(count_nodes(result)))
if DHParser.log.is_logging():
print('Saving AST')
with open('LOGS/' + file[:-4] + '.ast', 'w', encoding='utf-8') as f:
......
......@@ -32,7 +32,7 @@ from DHParser import logging, is_filename, load_if_file, \
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_nodes, remove_content, remove_brackets, replace_parser, remove_anonymous_tokens, \
keep_children, is_one_of, has_content, apply_if, remove_first, remove_last, \
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, ZOMBIE_NODE
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, PLACEHOLDER
#######################################################################
......@@ -574,7 +574,7 @@ class XMLCompiler(Compiler):
node.attr.update(attributes)
preserve_whitespace |= attributes.get('xml:space', '') == 'preserve'
node.tag_name = tag_name
content = self.compile_children(node.get('content', ZOMBIE_NODE))
content = self.compile_children(node.get('content', PLACEHOLDER))
if len(content) == 1:
if content[0].tag_name == "CharData":
# reduce single CharData children
......
......@@ -7,11 +7,13 @@ import doctest
import multiprocessing
import os
import platform
#import subprocess
#import sys
import sys
import time
import threading
scriptdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(scriptdir, '../'))
lock = threading.Lock()
......@@ -33,8 +35,6 @@ def run_doctests(module):