Commit 813bebe5 authored by Eckhart Arnold's avatar Eckhart Arnold

- bugfixes

parent 821cb67c
...@@ -81,7 +81,7 @@ from DHParser import logging, is_filename, load_if_file, \\ ...@@ -81,7 +81,7 @@ from DHParser import logging, is_filename, load_if_file, \\
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \\ ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \\
last_value, counterpart, accumulate, PreprocessorFunc, \\ last_value, counterpart, accumulate, PreprocessorFunc, \\
Node, TransformationFunc, \\ Node, TransformationFunc, \\
traverse, remove_children_if, \\ traverse, remove_children_if, join, \\
reduce_single_child, replace_by_single_child, remove_whitespace, \\ reduce_single_child, replace_by_single_child, remove_whitespace, \\
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \\ remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \\
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\ is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
......
...@@ -132,7 +132,7 @@ PreprocessorFunc = Union[Callable[[str], str], partial] ...@@ -132,7 +132,7 @@ PreprocessorFunc = Union[Callable[[str], str], partial]
LEFT_RECURSION_DEPTH = 8 # type: int LEFT_RECURSION_DEPTH = 8 # type: int
# because of python's recursion depth limit, this value ought not to be # because of python's recursion depth limit, this value ought not to be
# set too high. PyPy allows higher values than CPython # set too high. PyPy allows higher values than CPython
MAX_DROPOUTS = 5 # type: int MAX_DROPOUTS = 3 # type: int
# stop trying to recover parsing after so many errors # stop trying to recover parsing after so many errors
...@@ -231,7 +231,8 @@ def add_parser_guard(parser_func): ...@@ -231,7 +231,8 @@ def add_parser_guard(parser_func):
# in case of left recursion, the first recursive step that # in case of left recursion, the first recursive step that
# matches will store its result in the cache # matches will store its result in the cache
parser.visited[location] = (node, rest) parser.visited[location] = (node, rest)
grammar.last_node__ = node # store last node for Lookbehind parser # store last non-empty node for Lookbehind parser
if len(rest) < location: grammar.last_node__ = node
parser.recursion_counter[location] -= 1 parser.recursion_counter[location] -= 1
...@@ -293,6 +294,15 @@ class Parser(ParserBase, metaclass=ParserMetaClass): ...@@ -293,6 +294,15 @@ class Parser(ParserBase, metaclass=ParserMetaClass):
2. *Anonymous parsers* where the name-field just contains the empty 2. *Anonymous parsers* where the name-field just contains the empty
string. AST-transformation of Anonymous parsers can be hooked string. AST-transformation of Anonymous parsers can be hooked
only to their class name, and not to the individual parser. only to their class name, and not to the individual parser.
Parser objects are callable and parsing is done by calling a parser
object with the text to parse. If the parser matches it returns
a tuple consisting of a node representing the root of the concrete
syntax tree resulting from the match as well as the substring
`text[i:]` where i is the length of matched text (which can be
zero in the case of parsers like `ZeroOrMore` or `Optional`).
If `i > 0` then the parser has "moved forward". If the parser does
not match it returns `(None, text).
""" """
ApplyFunc = Callable[['Parser'], None] ApplyFunc = Callable[['Parser'], None]
...@@ -304,15 +314,27 @@ class Parser(ParserBase, metaclass=ParserMetaClass): ...@@ -304,15 +314,27 @@ class Parser(ParserBase, metaclass=ParserMetaClass):
self.reset() self.reset()
def __deepcopy__(self, memo): def __deepcopy__(self, memo):
"""Deepcopy method of the parser. Upon instantiation of a Grammar-
object, parsers will be deep-copied to the Grammar object. If a
derived parser-class changes the signature of the constructor,
`__deepcopy__`-method must be replaced (i.e. overridden without
calling the same method from the superclass) by the derived class.
"""
return self.__class__(self.name) return self.__class__(self.name)
def reset(self): def reset(self):
"""Initializes or resets any parser variables. If overwritten,
the `reset()`-method of the parent class must be called from the
`reset()`-method of the derived class."""
self.visited = dict() # type: Dict[int, Tuple[Node, str]] self.visited = dict() # type: Dict[int, Tuple[Node, str]]
self.recursion_counter = dict() # type: Dict[int, int] self.recursion_counter = dict() # type: Dict[int, int]
self.cycle_detection = set() # type: Set[Callable] self.cycle_detection = set() # type: Set[Callable]
return self return self
def __call__(self, text: str) -> Tuple[Node, str]: def __call__(self, text: str) -> Tuple[Node, str]:
"""Applies the parser to the given `text` and returns a node with
the results or None as well as the text at the position right behind
the matching string."""
return None, text # default behaviour: don't match return None, text # default behaviour: don't match
def __add__(self, other: 'Parser') -> 'Series': def __add__(self, other: 'Parser') -> 'Series':
...@@ -332,10 +354,12 @@ class Parser(ParserBase, metaclass=ParserMetaClass): ...@@ -332,10 +354,12 @@ class Parser(ParserBase, metaclass=ParserMetaClass):
@grammar.setter @grammar.setter
def grammar(self, grammar: 'Grammar'): def grammar(self, grammar: 'Grammar'):
assert self._grammar is None or self._grammar == grammar, \ if self._grammar is None:
"Parser has already been assigned to a Grammar object!" self._grammar = grammar
self._grammar = grammar self._grammar_assigned_notifier()
self._grammar_assigned_notifier() else:
assert self._grammar == grammar, \
"Parser has already been assigned to a different Grammar object!"
def _grammar_assigned_notifier(self): def _grammar_assigned_notifier(self):
"""A function that notifies the parser object that it has been """A function that notifies the parser object that it has been
...@@ -345,7 +369,7 @@ class Parser(ParserBase, metaclass=ParserMetaClass): ...@@ -345,7 +369,7 @@ class Parser(ParserBase, metaclass=ParserMetaClass):
def apply(self, func: ApplyFunc): def apply(self, func: ApplyFunc):
""" """
Applies function `func(parser)` recursively to this parser and all Applies function `func(parser)` recursively to this parser and all
descendants of the tree of parsers. The same function can never descendant parsers if any exist. The same function can never
be applied twice between calls of the ``reset()``-method! be applied twice between calls of the ``reset()``-method!
""" """
if func in self.cycle_detection: if func in self.cycle_detection:
...@@ -387,7 +411,7 @@ class Grammar: ...@@ -387,7 +411,7 @@ class Grammar:
>>> number = RE('\d+') + RE('\.') + RE('\d+') | RE('\d+') >>> number = RE('\d+') + RE('\.') + RE('\d+') | RE('\d+')
>>> number_parser = Grammar(number) >>> number_parser = Grammar(number)
>>> number_parser("3.1416").show() >>> number_parser("3.1416").content()
'3.1416' '3.1416'
Collecting the parsers that define a grammar in a descentand class of Collecting the parsers that define a grammar in a descentand class of
...@@ -518,7 +542,7 @@ class Grammar: ...@@ -518,7 +542,7 @@ class Grammar:
# parsers not connected to the root object will be copied later # parsers not connected to the root object will be copied later
# on demand (see Grammar.__getitem__()). Usually, the need to # on demand (see Grammar.__getitem__()). Usually, the need to
# do so only arises during testing. # do so only arises during testing.
self.root__ = root if root else copy.deepcopy(self.__class__.root__) self.root__ = copy.deepcopy(root) if root else copy.deepcopy(self.__class__.root__)
if self.wspL__: if self.wspL__:
self.wsp_left_parser__ = Whitespace(self.wspL__) # type: ParserBase self.wsp_left_parser__ = Whitespace(self.wspL__) # type: ParserBase
...@@ -556,7 +580,7 @@ class Grammar: ...@@ -556,7 +580,7 @@ class Grammar:
self.rollback__ = [] # type: List[Tuple[int, Callable]] self.rollback__ = [] # type: List[Tuple[int, Callable]]
self.last_rb__loc__ = -1 # type: int self.last_rb__loc__ = -1 # type: int
# previously parsed node, needed by Lookbehind parser # previously parsed node, needed by Lookbehind parser
self.last_node__ = None # type: Node self.last_node__ = Node(ZOMBIE_PARSER, '') # type: Node
# support for call stack tracing # support for call stack tracing
self.call_stack__ = [] # type: List[Parser] self.call_stack__ = [] # type: List[Parser]
# snapshots of call stacks # snapshots of call stacks
...@@ -807,13 +831,20 @@ class PreprocessorToken(Parser): ...@@ -807,13 +831,20 @@ class PreprocessorToken(Parser):
class RegExp(Parser): class RegExp(Parser):
""" """Regular expression parser.
Regular expression parser.
The RegExp-parser parses text that matches a regular expression. The RegExp-parser parses text that matches a regular expression.
RegExp can also be considered as the "atomic parser", because all RegExp can also be considered as the "atomic parser", because all
other parsers delegate part of the parsing job to other parsers, other parsers delegate part of the parsing job to other parsers,
but do not match text directly. but do not match text directly.
Example:
>>> word = RegExp(r'\w+')
>>> Grammar(word)("Haus").content()
'Haus'
EBNF-Notation: `/ ... /`
EBNF-Example: `word = /\w+/`
""" """
def __init__(self, regexp, name: str = '') -> None: def __init__(self, regexp, name: str = '') -> None:
...@@ -856,6 +887,21 @@ class RE(Parser): ...@@ -856,6 +887,21 @@ class RE(Parser):
string, e.g. use r'\s*' or r'[\t ]+', but not r'\s+'. If the string, e.g. use r'\s*' or r'[\t ]+', but not r'\s+'. If the
respective parameters in the constructor are set to ``None`` the respective parameters in the constructor are set to ``None`` the
default whitespace expression from the Grammar object will be used. default whitespace expression from the Grammar object will be used.
Example (allowing whitespace on the right hand side, but not on
the left hand side of a regular expression):
>>> word = RE(r'\w+', wR=r'\s*')
>>> parser = Grammar(word)
>>> result = parser('Haus ')
>>> result.content()
'Haus '
>>> result.structure()
'(:RE (:RegExp "Haus") (:Whitespace " "))'
>>> parser(' Haus').content()
' <<< Error on " Haus" | Parser did not match! Invalid source file? >>> '
EBNF-Notation: `/ ... /~` or `~/ ... /` or `~/ ... /~`
EBNF-Example: `word = /\w+/~`
""" """
def __init__(self, regexp, wL=None, wR=None, name=''): def __init__(self, regexp, wL=None, wR=None, name=''):
"""Constructor for class RE. """Constructor for class RE.
...@@ -1004,6 +1050,30 @@ class NaryOperator(Parser): ...@@ -1004,6 +1050,30 @@ class NaryOperator(Parser):
class Optional(UnaryOperator): class Optional(UnaryOperator):
"""
Parser `Optional` always matches, even if its child-parser
did not match.
If the child-parser did not match `Optional` returns a node
with no content and does not move forward in the text.
If the child-parser did match, `Optional` returns the a node
with the node returnd by the child-parser as its single
child and the text at the position where the child-parser
left it.
Examples:
>>> number = Optional(Token('-')) + RegExp(r'\d+') + Optional(RegExp(r'\.\d+'))
>>> Grammar(number)('3.14159').content()
'3.14159'
>>> Grammar(number)('3.14159').structure()
'(:Series (:Optional) (:RegExp "3") (:Optional (:RegExp ".14159")))'
>>> Grammar(number)('-1').content()
'-1'
EBNF-Notation: `[ ... ]`
EBNF-Example: `number = ["-"] /\d+/ [ /\.\d+/ ]
"""
def __init__(self, parser: Parser, name: str = '') -> None: def __init__(self, parser: Parser, name: str = '') -> None:
super(Optional, self).__init__(parser, name) super(Optional, self).__init__(parser, name)
# assert isinstance(parser, Parser) # assert isinstance(parser, Parser)
...@@ -1024,6 +1094,7 @@ class Optional(UnaryOperator): ...@@ -1024,6 +1094,7 @@ class Optional(UnaryOperator):
return '[' + (self.parser.repr[1:-1] if isinstance(self.parser, Alternative) return '[' + (self.parser.repr[1:-1] if isinstance(self.parser, Alternative)
and not self.parser.name else self.parser.repr) + ']' and not self.parser.name else self.parser.repr) + ']'
class ZeroOrMore(Optional): class ZeroOrMore(Optional):
def __call__(self, text: str) -> Tuple[Node, str]: def __call__(self, text: str) -> Tuple[Node, str]:
results = () # type: Tuple[Node, ...] results = () # type: Tuple[Node, ...]
...@@ -1120,12 +1191,12 @@ class Alternative(NaryOperator): ...@@ -1120,12 +1191,12 @@ class Alternative(NaryOperator):
# the order of the sub-expression matters! # the order of the sub-expression matters!
>>> number = RE('\d+') | RE('\d+') + RE('\.') + RE('\d+') >>> number = RE('\d+') | RE('\d+') + RE('\.') + RE('\d+')
>>> Grammar(number)("3.1416").show() >>> Grammar(number)("3.1416").content()
'3 <<< Error on ".1416" | Parser stopped before end! trying to recover... >>> ' '3 <<< Error on ".1416" | Parser stopped before end! trying to recover... >>> '
# the most selective expression should be put first: # the most selective expression should be put first:
>>> number = RE('\d+') + RE('\.') + RE('\d+') | RE('\d+') >>> number = RE('\d+') + RE('\.') + RE('\d+') | RE('\d+')
>>> Grammar(number)("3.1416").show() >>> Grammar(number)("3.1416").content()
'3.1416' '3.1416'
""" """
...@@ -1246,7 +1317,6 @@ class Lookbehind(FlowOperator): ...@@ -1246,7 +1317,6 @@ class Lookbehind(FlowOperator):
assert isinstance(p, RegExp), str(type(p)) assert isinstance(p, RegExp), str(type(p))
self.regexp = p.main.regexp if isinstance(p, RE) else p.regexp self.regexp = p.main.regexp if isinstance(p, RE) else p.regexp
super(Lookbehind, self).__init__(parser, name) super(Lookbehind, self).__init__(parser, name)
print("WARNING: Lookbehind Operator is experimental!")
def __call__(self, text: str) -> Tuple[Node, str]: def __call__(self, text: str) -> Tuple[Node, str]:
if self.sign(self.condition()): if self.sign(self.condition()):
...@@ -1262,7 +1332,10 @@ class Lookbehind(FlowOperator): ...@@ -1262,7 +1332,10 @@ class Lookbehind(FlowOperator):
def condition(self): def condition(self):
node = self.grammar.last_node__ node = self.grammar.last_node__
return node and self.regexp.match(str(node)) assert node is not None # can be removed
s = str(node)
assert s or node.parser.name == '__ZOMBIE__', str(node.parser)
return self.regexp.match(s)
class NegativeLookbehind(Lookbehind): class NegativeLookbehind(Lookbehind):
......
...@@ -132,6 +132,17 @@ StrictResultType = Union[ChildrenType, str] ...@@ -132,6 +132,17 @@ StrictResultType = Union[ChildrenType, str]
ResultType = Union[ChildrenType, 'Node', str, None] ResultType = Union[ChildrenType, 'Node', str, None]
def oneliner_sxpr(sxpr: str) -> str:
"""Returns S-expression `sxpr` as a one liner without unnecessary
whitespace.
Example:
>>> oneliner_sxpr('(a\\n (b\\n c\\n )\\n)\\n')
'(a (b c))'
"""
return re.sub('\s(?=\))', '', re.sub('\s+', ' ', sxpr)).strip()
class Node: class Node:
""" """
Represents a node in the concrete or abstract syntax tree. Represents a node in the concrete or abstract syntax tree.
...@@ -259,13 +270,34 @@ class Node: ...@@ -259,13 +270,34 @@ class Node:
def errors(self) -> List[Error]: def errors(self) -> List[Error]:
return [Error(self.pos, err) for err in self._errors] return [Error(self.pos, err) for err in self._errors]
def show(self) -> str: def add_error(self, error_str) -> 'Node':
"""Returns content as string, inserting error messages where self._errors.append(error_str)
errors occurred. self.error_flag = True
return self
def propagate_error_flags(self) -> None:
"""Recursively propagates error flags set on child nodes to its
parents. This can be used if errors are added to descendant
nodes after syntaxtree construction, i.e. in the compile phase.
""" """
s = "".join(child.show() for child in self.children) if self.children \ for child in self.children:
else str(self.result) child.propagate_error_flags()
return (' <<< Error on "%s" | %s >>> ' % (s, '; '.join(self._errors))) if self._errors else s self.error_flag = self.error_flag or child.error_flag
def collect_errors(self, clear_errors=False) -> List[Error]:
"""
Returns all errors of this node or any child node in the form
of a set of tuples (position, error_message), where position
is always relative to this node.
"""
errors = self.errors
if clear_errors:
self._errors = []
self.error_flag = False
if self.children:
for child in self.children:
errors.extend(child.collect_errors(clear_errors))
return errors
def _tree_repr(self, tab, openF, closeF, dataF=identity, density=0) -> str: def _tree_repr(self, tab, openF, closeF, dataF=identity, density=0) -> str:
""" """
...@@ -363,39 +395,20 @@ class Node: ...@@ -363,39 +395,20 @@ class Node:
return self._tree_repr(' ', opening, closing, density=1) return self._tree_repr(' ', opening, closing, density=1)
def add_error(self, error_str) -> 'Node': def structure(self) -> str:
self._errors.append(error_str) """Return structure (and content) as S-expression on a single line
self.error_flag = True without any line breaks."""
return self return oneliner_sxpr(self.as_sxpr())
def propagate_error_flags(self) -> None:
"""Recursively propagates error flags set on child nodes to its
parents. This can be used if errors are added to descendant
nodes after syntaxtree construction, i.e. in the compile phase.
"""
for child in self.children:
child.propagate_error_flags()
self.error_flag = self.error_flag or child.error_flag
def collect_errors(self, clear_errors=False) -> List[Error]: def content(self) -> str:
""" """
Returns all errors of this node or any child node in the form Returns content as string, inserting error messages where
of a set of tuples (position, error_message), where position errors occurred.
is always relative to this node.
""" """
errors = self.errors s = "".join(child.content() for child in self.children) if self.children \
if clear_errors: else str(self.result)
self._errors = [] return (
self.error_flag = False ' <<< Error on "%s" | %s >>> ' % (s, '; '.join(self._errors))) if self._errors else s
if self.children:
for child in self.children:
errors.extend(child.collect_errors(clear_errors))
return errors
def log(self, log_file_name):
st_file_name = log_file_name
with open(os.path.join(log_dir(), st_file_name), "w", encoding="utf-8") as f:
f.write(self.as_sxpr())
def find(self, match_function: Callable) -> Iterator['Node']: def find(self, match_function: Callable) -> Iterator['Node']:
"""Finds nodes in the tree that match a specific criterion. """Finds nodes in the tree that match a specific criterion.
...@@ -458,6 +471,11 @@ class Node: ...@@ -458,6 +471,11 @@ class Node:
# return self.result, # return self.result,
# return nav(path.split('/')) # return nav(path.split('/'))
def log(self, log_file_name):
st_file_name = log_file_name
with open(os.path.join(log_dir(), st_file_name), "w", encoding="utf-8") as f:
f.write(self.as_sxpr())
def mock_syntax_tree(sxpr): def mock_syntax_tree(sxpr):
""" """
...@@ -511,17 +529,6 @@ def mock_syntax_tree(sxpr): ...@@ -511,17 +529,6 @@ def mock_syntax_tree(sxpr):
return Node(MockParser(name, ':' + class_name), result) return Node(MockParser(name, ':' + class_name), result)
def compact_sxpr(s) -> str:
"""Returns S-expression ``s`` as a one liner without unnecessary
whitespace.
Example:
>>> compact_sxpr('(a\\n (b\\n c\\n )\\n)\\n')
'(a (b c))'
"""
return re.sub('\s(?=\))', '', re.sub('\s+', ' ', s)).strip()
TransformationFunc = Union[Callable[[Node], Any], partial] TransformationFunc = Union[Callable[[Node], Any], partial]
......
...@@ -28,7 +28,7 @@ except ImportError: ...@@ -28,7 +28,7 @@ except ImportError:
from DHParser import error_messages from DHParser import error_messages
from DHParser.toolkit import is_logging from DHParser.toolkit import is_logging
from DHParser.syntaxtree import mock_syntax_tree, compact_sxpr from DHParser.syntaxtree import mock_syntax_tree, oneliner_sxpr
__all__ = ('unit_from_configfile', __all__ = ('unit_from_configfile',
'unit_from_json', 'unit_from_json',
...@@ -171,8 +171,8 @@ def grammar_unit(test_unit, parser_factory, transformer_factory, report=True, ve ...@@ -171,8 +171,8 @@ def grammar_unit(test_unit, parser_factory, transformer_factory, report=True, ve
errata.append('Abstract syntax tree test "%s" for parser "%s" failed:' errata.append('Abstract syntax tree test "%s" for parser "%s" failed:'
'\n\tExpr.: %s\n\tExpected: %s\n\tReceived: %s' '\n\tExpr.: %s\n\tExpected: %s\n\tReceived: %s'
% (test_name, parser_name, '\n\t'.join(test_code.split('\n')), % (test_name, parser_name, '\n\t'.join(test_code.split('\n')),
compact_sxpr(compare.as_sxpr()), oneliner_sxpr(compare.as_sxpr()),
compact_sxpr(ast.as_sxpr()))) oneliner_sxpr(ast.as_sxpr())))
tests.setdefault('__err__', {})[test_name] = errata[-1] tests.setdefault('__err__', {})[test_name] = errata[-1]
if verbose: if verbose:
print(infostr + ("OK" if len(errata) == errflag else "FAIL")) print(infostr + ("OK" if len(errata) == errflag else "FAIL"))
......
...@@ -57,8 +57,8 @@ block_environment = known_environment | generic_block ...@@ -57,8 +57,8 @@ block_environment = known_environment | generic_block
known_environment = itemize | enumerate | figure | table | quotation known_environment = itemize | enumerate | figure | table | quotation
| verbatim | verbatim
generic_block = begin_generic_block sequence §end_generic_block generic_block = begin_generic_block sequence §end_generic_block
begin_generic_block = -&SUCC_LB begin_environment &PRED_LB begin_generic_block = -&SUCC_LB begin_environment -&SUCC_LB
end_generic_block = -&SUCC_LB end_environment &PRED_LB end_generic_block = -&SUCC_LB end_environment -&SUCC_LB
itemize = "\begin{itemize}" [PARSEP] { item } §"\end{itemize}" itemize = "\begin{itemize}" [PARSEP] { item } §"\end{itemize}"
enumerate = "\begin{enumerate}" [PARSEP] {item } §"\end{enumerate}" enumerate = "\begin{enumerate}" [PARSEP] {item } §"\end{enumerate}"
...@@ -86,8 +86,8 @@ text_elements = command | text | block | inline_environment ...@@ -86,8 +86,8 @@ text_elements = command | text | block | inline_environment
inline_environment = known_inline_env | generic_inline_env inline_environment = known_inline_env | generic_inline_env
known_inline_env = inline_math known_inline_env = inline_math
generic_inline_env = begin_inline_env { text_elements }+ §end_inline_env generic_inline_env = begin_inline_env { text_elements }+ §end_inline_env
begin_inline_env = (-!SUCC_LB begin_environment) | (begin_environment !PRED_LB) begin_inline_env = (-!SUCC_LB begin_environment) | (begin_environment -!SUCC_LB)
end_inline_env = (-!SUCC_LB end_environment) | (end_environment !PRED_LB) end_inline_env = (-!SUCC_LB end_environment) | (end_environment -!SUCC_LB)
begin_environment = "\begin{" §NAME §"}" begin_environment = "\begin{" §NAME §"}"
end_environment = "\end{" §::NAME §"}" end_environment = "\end{" §::NAME §"}"
...@@ -144,7 +144,7 @@ WSPC = /[ \t]+/ # (horizontal) whitespace ...@@ -144,7 +144,7 @@ WSPC = /[ \t]+/ # (horizontal) whitespace
LF = !PARSEP /[ \t]*\n[ \t]*/ # linefeed but not an empty line LF = !PARSEP /[ \t]*\n[ \t]*/ # linefeed but not an empty line
PARSEP = /[ \t]*(?:\n[ \t]*)+\n[ \t]*/ # at least one empty line, i.e. PARSEP = /[ \t]*(?:\n[ \t]*)+\n[ \t]*/ # at least one empty line, i.e.
# [whitespace] linefeed [whitespace] linefeed # [whitespace] linefeed [whitespace] linefeed
EOF = !/./ EOF = /(?!.)/
SUCC_LB = /(?:.*\n)+\s*$/ # linebreak succeeding an arbitrary chunk of text SUCC_LB = /(?!.)|(?:.*\n)+\s*$/ # linebreak succeeding an arbitrary chunk of text
PRED_LB = /\s*?\n/ # linebreak preeceding any text # PRED_LB = /\s*(?!.)|\s*?\n/ # linebreak preeceding any text
This diff is collapsed.
...@@ -62,22 +62,22 @@ newpath ...@@ -62,22 +62,22 @@ newpath
36 62 lineto 36 62 lineto
stroke stroke
400 28 moveto 400 28 moveto
(Generations) show (Generations) content
190 46 moveto 190 46 moveto
(10) show (10) content
351 46 moveto 351 46 moveto
(20) show (20) content
512 46 moveto 512 46 moveto
(30) show (30) content
673 46 moveto 673 46 moveto
(40) show (40) content
17 274 moveto 17 274 moveto
90.000000 rotate 90.000000 rotate
(Population) show (Population) content
-90.000000 rotate -90.000000 rotate
32 537 moveto 32 537 moveto
90.000000 rotate 90.000000 rotate
(1.0) show (1.0) content
-90.000000 rotate -90.000000 rotate
0.000000 0.000000 0.500000 setrgbcolor 0.000000 0.000000 0.500000 setrgbcolor
2.000000 setlinewidth 2.000000 setlinewidth
...@@ -362,7 +362,7 @@ newpath ...@@ -362,7 +362,7 @@ newpath
stroke stroke
0.000000 0.000000 0.000000 setrgbcolor 0.000000 0.000000 0.000000 setrgbcolor
165 9 moveto 165 9 moveto
(Dove) show (Dove) content
0.330000 1.000000 1.000000 setrgbcolor 0.330000 1.000000 1.000000 setrgbcolor
newpath newpath
257 15 moveto 257 15 moveto
...@@ -370,7 +370,7 @@ newpath ...@@ -370,7 +370,7 @@ newpath
stroke stroke
0.000000 0.000000 0.000000 setrgbcolor 0.000000 0.000000 0.000000 setrgbcolor
277 9 moveto 277 9 moveto
(Hawk) show (Hawk) content
1.000000 0.000000 1.000000 setrgbcolor 1.000000 0.000000 1.000000 setrgbcolor
newpath newpath
369 15 moveto 369 15 moveto
...@@ -378,7 +378,7 @@ newpath ...@@ -378,7 +378,7 @@ newpath
stroke stroke
0.000000 0.000000 0.000000 setrgbcolor 0.000000 0.000000 0.000000 setrgbcolor
389 9 moveto 389 9 moveto
(Pavlov) show (Pavlov) content
0.000000 1.000000 0.000000 setrgbcolor 0.000000 1.000000 0.000000 setrgbcolor
newpath newpath
481 15 moveto 481 15 moveto
...@@ -386,7 +386,7 @@ newpath ...@@ -386,7 +386,7 @@ newpath
stroke