Commit 9a7f06aa authored by di68kap's avatar di68kap
Browse files

- DHParser/parser.py: support for "anonymizing" named parsers, so that they do not emit named nodes

parent 67b14eb4
......@@ -931,7 +931,7 @@ class EBNFCompiler(Compiler):
definitions = [] # type: List[Tuple[str, str]]
# drop the wrapping sequence node
if len(node.children) == 1 and node.children[0].is_anonymous():
if len(node.children) == 1 and node.children[0].anonymous:
node = node.children[0]
# compile definitions and directives and collect definitions
......
......@@ -32,7 +32,7 @@ for an example.
from collections import defaultdict
import copy
from typing import Callable, cast, List, Tuple, Set, Iterator, Dict, \
from typing import Callable, cast, List, Tuple, Set, Container, Dict, \
DefaultDict, Union, Optional, Any
from DHParser.configuration import get_config_value
......@@ -195,6 +195,13 @@ ApplyFunc = Callable[['Parser'], None]
FlagFunc = Callable[[ApplyFunc, Set[ApplyFunc]], bool]
def copy_parser_attrs(src: 'Parser', duplicate: 'Parser'):
"""Duplicates all parser attributes from source to dest."""
duplicate.pname = src.pname
duplicate.anonymous = src.anonymous
duplicate.tag_name = src.tag_name
class Parser:
"""
(Abstract) Base class for Parser combinator parsers. Any parser
......@@ -234,8 +241,13 @@ class Parser:
contained parser is repeated zero times.
Attributes and Properties:
pname: The parser name or the empty string in case the parser
remains anonymous.
pname: The parser's name or a (possibly empty) alias name in case
of an anonymous parser.
anonymous: A property indicating that the parser remains anynomous
anonymous with respect to the nodes it returns. For performance
reasons this is implemented as an object variable rather
than a property. This property must always be equal to
`self.tag_name[0] == ":"`.
tag_name: The tag_name for the nodes that are created by
the parser. If the parser is named, this is the same as
`pname`, otherwise it is the name of the parser's type.
......@@ -261,6 +273,7 @@ class Parser:
def __init__(self) -> None:
# assert isinstance(name, str), str(name)
self.pname = '' # type: str
self.anonymous = True # type: bool
self.tag_name = self.ptype # type: str
self.cycle_detection = set() # type: Set[ApplyFunc]
try:
......@@ -277,8 +290,7 @@ class Parser:
calling the same method from the superclass) by the derived class.
"""
duplicate = self.__class__()
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
copy_parser_attrs(self, duplicate)
return duplicate
def __repr__(self):
......@@ -349,7 +361,7 @@ class Parser:
if history_tracking__:
grammar.call_stack__.append(
((self.repr if self.tag_name in (':RegExp', ':Token', ':DropToken')
else self.tag_name), location))
else (self.pname or self.tag_name)), location))
grammar.moving_forward__ = True
error = None
......@@ -652,14 +664,14 @@ class Grammar:
Upon instantiation the parser objects are deep-copied to the
Grammar object and assigned to object variables of the same name.
Any parser that is directly assigned to a class variable is a
'named' parser and its field `parser.pname` contains the variable
name after instantiation of the Grammar class. All other parsers,
i.e. parsers that are defined within a `named` parser, remain
"anonymous parsers" where `parser.pname` is the empty string.
For any parser that is directly assigned to a class variable the
field `parser.pname` contains the variable name after instantiation
of the Grammar class. The parser will never the less remain anonymous
with respect to the tag names of the nodes it generates, if its name
is matched by the `anonymous__` regular expression.
If one and the same parser is assigned to several class variables
such as, for example, the parser `expression` in the example above,
the first name sticks.
which is also assigned to `root__`, the first name sticks.
Grammar objects are callable. Calling a grammar object with a UTF-8
encoded document, initiates the parsing of the document with the
......@@ -682,11 +694,11 @@ class Grammar:
that act as rules to find the reentry point if a ParserError was
thrown during the execution of the parser with the respective name.
anonymous__: Either a regular expression or a set of strings that
identify names of parsers that shall be treated as anonymous parsers,
even though they are assigned to a class field (see
`:func:_assign_parser_names()`). The default is to treat all parsers
starting with an underscore as anonymous in addition to those
anonymous__: A regular expression to identify names of parsers that are
assigned to class fields but shall never the less yield anonymous
nodes (i.e. nodes the tag name of which starts with a colon ":"
followed by the parser's class name). The default is to treat all
parsers starting with an underscore as anonymous in addition to those
parsers that are not directly assigned to a class field.
parser_initialization__: Before the grammar class (!) has been initialized,
......@@ -811,7 +823,7 @@ class Grammar:
# root__ must be overwritten with the root-parser by grammar subclass
parser_initialization__ = ["pending"] # type: List[str]
resume_rules__ = dict() # type: Dict[str, ResumeList]
anonymous__ = re.compile(r'_\w+') # type: Union[RxPatternType, Set[str]]
anonymous__ = re.compile(r'_') # type: RxPatternType
# some default values
# COMMENT__ = r'' # type: str # r'#.*(?:\n|$)'
# WSP_RE__ = mixin_comment(whitespace=r'[\t ]*', comment=COMMENT__) # type: str
......@@ -846,11 +858,14 @@ class Grammar:
cdict = cls.__dict__
for entry, parser in cdict.items():
if isinstance(parser, Parser) and sane_parser_name(entry):
anonymous = True if cls.anonymous__.match(entry) else False
if isinstance(parser, Forward):
if not cast(Forward, parser).parser.pname:
cast(Forward, parser).parser.pname = entry
cast(Forward, parser).parser.anonymous = anonymous
else: # if not parser.pname:
parser.pname = entry
parser.anonymous = anonymous
cls.parser_initialization__[0] = "done"
......@@ -958,7 +973,7 @@ class Grammar:
'already exists in grammar object: %s!'
% (parser.pname, str(self.__dict__[parser.pname])))
setattr(self, parser.pname, parser)
parser.tag_name = parser.pname or parser.ptype
parser.tag_name = parser.ptype if parser.anonymous else parser.pname
self.all_parsers__.add(parser)
parser.grammar = self
......@@ -1239,10 +1254,13 @@ class PreprocessorToken(Parser):
assert RX_TOKEN_NAME.match(token)
super(PreprocessorToken, self).__init__()
self.pname = token
if token:
self.anonymous = False
def __deepcopy__(self, memo):
duplicate = self.__class__(self.pname)
# duplicate.pname = self.pname # will be written by the constructor, anyway
# duplicate.pname = self.pname
duplicate.anonymous = self.anonymous
duplicate.tag_name = self.tag_name
return duplicate
......@@ -1295,13 +1313,12 @@ class Token(Parser):
def __deepcopy__(self, memo):
duplicate = self.__class__(self.text)
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
copy_parser_attrs(self, duplicate)
return duplicate
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
if text.startswith(self.text):
if self.text or self.pname:
if self.text or not self.anonymous:
return Node(self.tag_name, self.text, True), text[self.len:]
return EMPTY_NODE, text[0:]
return None, text
......@@ -1316,7 +1333,7 @@ class DropToken(Token):
string on a match. Violates the invariant: str(parse(text)) == text !
"""
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
assert not self.pname, "DropToken must not be used for named parsers!"
assert self.anonymous, "DropToken must not be used for named parsers!"
if text.startswith(self.text):
return EMPTY_NODE, text[self.len:]
# return Node(self.tag_name, self.text, True), text[self.len:]
......@@ -1354,15 +1371,14 @@ class RegExp(Parser):
except TypeError:
regexp = self.regexp.pattern
duplicate = self.__class__(regexp)
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
copy_parser_attrs(self, duplicate)
return duplicate
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
match = text.match(self.regexp)
if match:
capture = match.group(0)
if capture or self.pname:
if capture or not self.anonymous:
end = text.index(match.end())
return Node(self.tag_name, capture, True), text[end:]
assert text.index(match.end()) == 0
......@@ -1414,7 +1430,7 @@ class Whitespace(RegExp):
match = text.match(self.regexp)
if match:
capture = match.group(0)
if capture or self.pname:
if capture or not self.anonymous:
end = text.index(match.end())
return Node(self.tag_name, capture, True), text[end:]
else:
......@@ -1433,7 +1449,7 @@ class DropWhitespace(Whitespace):
"""
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
assert not self.pname, "DropWhitespace must not be used for named parsers!"
assert self.anonymous, "DropWhitespace must not be used for named parsers!"
match = text.match(self.regexp)
if match:
# capture = match.group(0)
......@@ -1476,14 +1492,14 @@ class MetaParser(Parser):
assert node is None or isinstance(node, Node)
if self._grammar.flatten_tree__:
if node:
if self.pname:
if self.anonymous:
return node
if node.tag_name[0] == ':': # faster than node.is_anonymous()
return Node(self.tag_name, node._result)
return Node(self.tag_name, node)
return node
elif self.pname:
return Node(self.tag_name, ())
elif self.anonymous:
return EMPTY_NODE # avoid creation of a node object for anonymous empty nodes
return Node(self.tag_name, ())
return Node(self.tag_name, node or ()) # unoptimized code
@cython.locals(N=cython.int)
......@@ -1510,9 +1526,9 @@ class MetaParser(Parser):
elif N == 1:
return self._return_value(results[0])
elif self._grammar.flatten_tree__:
if self.pname:
return Node(self.tag_name, ())
if self.anonymous:
return EMPTY_NODE # avoid creation of a node object for anonymous empty nodes
return Node(self.tag_name, ())
return Node(self.tag_name, results) # unoptimized code
......@@ -1535,8 +1551,7 @@ class UnaryParser(MetaParser):
def __deepcopy__(self, memo):
parser = copy.deepcopy(self.parser, memo)
duplicate = self.__class__(parser)
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
copy_parser_attrs(self, duplicate)
return duplicate
def _apply(self, func: ApplyFunc, flip: FlagFunc) -> bool:
......@@ -1566,8 +1581,7 @@ class NaryParser(MetaParser):
def __deepcopy__(self, memo):
parsers = copy.deepcopy(self.parsers, memo)
duplicate = self.__class__(*parsers)
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
copy_parser_attrs(self, duplicate)
return duplicate
def _apply(self, func: ApplyFunc, flip: FlagFunc) -> bool:
......@@ -1840,8 +1854,7 @@ class Series(NaryParser):
parsers = copy.deepcopy(self.parsers, memo)
duplicate = self.__class__(*parsers, mandatory=self.mandatory,
err_msgs=self.err_msgs, skip=self.skip)
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
copy_parser_attrs(self, duplicate)
return duplicate
@cython.locals(pos=cython.int, reloc=cython.int)
......@@ -2058,8 +2071,7 @@ class AllOf(NaryParser):
duplicate = self.__class__(*parsers, mandatory=self.mandatory,
err_msgs=self.err_msgs, skip=self.skip)
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
duplicate.num_parsers = self.num_parsers
copy_parser_attrs(self, duplicate)
return duplicate
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
......@@ -2219,7 +2231,7 @@ class Lookahead(FlowParser):
if self.sign(node is not None):
# static analysis requires lookahead to be disabled at document end
# or (self.grammar.static_analysis_pending__ and not text)):
return Node(self.tag_name, '') if self.pname else EMPTY_NODE, text
return (EMPTY_NODE if self.anonymous else Node(self.tag_name, '')), text
else:
return None, text
......@@ -2364,8 +2376,7 @@ class Retrieve(Parser):
def __deepcopy__(self, memo):
duplicate = self.__class__(self.symbol, self.filter)
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
copy_parser_attrs(self, duplicate)
return duplicate
def _parse(self, text: StringView) -> Tuple[Optional[Node], StringView]:
......@@ -2417,8 +2428,7 @@ class Pop(Retrieve):
def __deepcopy__(self, memo):
duplicate = self.__class__(self.symbol, self.filter)
duplicate.pname = self.pname
duplicate.tag_name = self.tag_name
copy_parser_attrs(self, duplicate)
duplicate.values = self.values[:]
return duplicate
......@@ -2498,6 +2508,7 @@ class Forward(Parser):
def __deepcopy__(self, memo):
duplicate = self.__class__()
# duplicate.pname = self.pname # Forward-Parsers should never have a name!
duplicate.anonymous = self.anonymous
duplicate.tag_name = self.tag_name
memo[id(self)] = duplicate
parser = copy.deepcopy(self.parser, memo)
......
......@@ -347,7 +347,8 @@ class Node: # (collections.abc.Sized): Base class omitted for cython-compatibil
except KeyError:
return surrogate
def is_anonymous(self) -> bool:
@property
def anonymous(self) -> bool:
"""Returns True, if the Node is an "anonymous" Node, i.e. a node that
has not been created by a named parser.
......
......@@ -403,12 +403,12 @@ def is_single_child(context: List[Node]) -> bool:
def is_named(context: List[Node]) -> bool:
"""Returns ``True`` if the current node's parser is a named parser."""
return not context[-1].is_anonymous()
return not context[-1].anonymous
def is_anonymous(context: List[Node]) -> bool:
"""Returns ``True`` if the current node's parser is an anonymous parser."""
return context[-1].is_anonymous()
return context[-1].anonymous
def is_insignificant_whitespace(context: List[Node]) -> bool:
......@@ -563,7 +563,7 @@ def _replace_by(node: Node, child: Node):
"""
Replaces node's contents by child's content including the tag name.
"""
if node.is_anonymous() or not child.is_anonymous():
if node.anonymous or not child.anonymous:
node.tag_name = child.tag_name
# name, ptype = (node.tag_name.split(':') + [''])[:2]
# child.parser = MockParser(name, ptype)
......
[match:json]
M1*: """
M1*: """{
"object":
{
"one": 1,
"two": 2,
"three": ["3"],
"fraction": 1.5,
"unicode": "Text with \uc4a3(unicode)"
},
"array": ["one", 2, 3],
"string": " string example ",
"true": true,
"false": false,
"null": null
}"""
M2: """
{
"leading and trailing whitespace": true
......@@ -11,7 +28,7 @@ M1*: """
[ast:json]
[fail:json]
M2: """
F1: """
{
"leading and trailing whitespace": True,
......@@ -20,6 +37,21 @@ M2: """
"""
F2: """{
"object":
{
"one": 1,
"two": 2,
"three": ["3"]
"fraction": 1.5,
"unicode": "\xc4a3"
},
"array": ["one", 2, 3],
"string": " string example ",
"true": true,
"false": false,
"null": null
}"""
[match:element]
......
......@@ -21,21 +21,25 @@
@ object_resume = /\}\s*/
@ member_error = /\w+/, 'Possible non-numerical and non-string values are `true`, `false` or `null` (always written with small letters and without quotation marks).'
@ member_error = /["\'`´]/, 'String values must be enclosed by double-quotation marks: "..."!'
@ member_error = /\\/, 'Possible escaped values are /, \\, b, n, r, t, or u.'
@ member_error = /\d/, '{1} does not represent a valid number or other value.'
@ member_resume = /(?=,|\})/
@ _members_resume = /(?="[^"\n]+":)/
@ string_error = /\\/, 'Possible escaped values are \\/, \\\\, \\b, \\n, \\r, \\t, or \\u, but not {1}'
@ string_error = '', 'Illegal character "{1}" in string.'
@ string_skip = /(?=")/
json = ~ element EOF
element = object | array | string | number | bool | null
object = "{" [member { "," §member }] "}"
member = string §":" element
array = "[" [element { "," element }] "]"
string = `"` §CHARACTERS `"` ~
json = ~ _element _EOF
_element = object | array | string | number | bool | null
object = "{" {_members} §"}"
# this construct allows better error tolerance, see 02_test_JDON_elements [fail:json] F2
_members = member { "," §member &(","|"}") }
member = string §":" _element
array = "[" [_element { "," _element }] "]"
string = `"` §_CHARACTERS `"` ~
number = INT FRAC EXP ~
bool = /true/~ | /false/~ # use regexes so values are not dropped as tokens
null = "null"
......@@ -46,13 +50,17 @@ null = "null"
#
#######################################################################
CHARACTERS = { /[^"\\]+/ | ESCAPE }
ESCAPE = /\\[\/bnrt\\]/ | /\\u/ HEX HEX HEX HEX
HEX = /[0-9a-fA-F]/
_CHARACTERS = { PLAIN | ESCAPE }
PLAIN = /[^"\\]+/
ESCAPE = /\\[\/bnrt\\]/ | UNICODE
UNICODE = "\u" HEX HEX
HEX = /[0-9a-fA-F][0-9a-fA-F]/
INT = [`-`] /[0-9]/ | /[1-9][0-9]+/
FRAC = [ `.` /[0-9]+/ ]
INT = [NEG] /[0-9]/ | /[1-9][0-9]+/
NEG = `-`
FRAC = [ DOT /[0-9]+/ ]
DOT = `.`
EXP = [ (`E`|`e`) [`+`|`-`] /[0-9]+/ ]
EOF = !/./ # no more characters ahead, end of file reached
_EOF = !/./ # no more characters ahead, end of file reached
......@@ -62,35 +62,40 @@ def get_preprocessor() -> PreprocessorFunc:
class jsonGrammar(Grammar):
r"""Parser for a json source file.
"""
element = Forward()
source_hash__ = "98f4e07c153dbfc9c2c13f2798e8122b"
_element = Forward()
source_hash__ = "f8b915f36ed3e2555e285af7e8483f11"
static_analysis_pending__ = [True]
parser_initialization__ = ["upon instantiation"]
string_skip__ = [re.compile(r'(?=")')]
string_err_msg__ = [(re.compile(r'(?=)'), 'Illegal character "{1}" in string.')]
member_err_msg__ = [(re.compile(r'\w+'), 'Possible non-numerical and non-string values are `true`, `false` or `null` (always written with small letters and without quotation marks).'), (re.compile(r'["\'`´]'), 'String values must be enclosed by double-quotation marks: "..."!'), (re.compile(r'\\'), 'Possible escaped values are /, \\, b, n, r, t, or u.'), (re.compile(r'\d'), '{1} does not represent a valid number or other value.')]
resume_rules__ = {'object': [re.compile(r'\}\s*')], 'member': [re.compile(r'(?=,|\})')]}
string_err_msg__ = [(re.compile(r'\\'), 'Possible escaped values are \\/, \\\\, \\b, \\n, \\r, \\t, or \\u, but not {1}'), (re.compile(r'(?=)'), 'Illegal character "{1}" in string.')]
member_err_msg__ = [(re.compile(r'["\'`´]'), 'String values must be enclosed by double-quotation marks: "..."!')]
resume_rules__ = {'object': [re.compile(r'\}\s*')], 'member': [re.compile(r'(?=,|\})')], '_members': [re.compile(r'(?="[^"\n]+":)')]}
COMMENT__ = r'(?:\/\/|#).*'
comment_rx__ = re.compile(COMMENT__)
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
dwsp__ = DropWhitespace(WSP_RE__)
EOF = NegativeLookahead(RegExp('.'))
_EOF = NegativeLookahead(RegExp('.'))
EXP = Option(Series(Alternative(DropToken("E"), DropToken("e")), Option(Alternative(DropToken("+"), DropToken("-"))), RegExp('[0-9]+')))
FRAC = Option(Series(DropToken("."), RegExp('[0-9]+')))
INT = Alternative(Series(Option(DropToken("-")), RegExp('[0-9]')), RegExp('[1-9][0-9]+'))
HEX = RegExp('[0-9a-fA-F]')
ESCAPE = Alternative(RegExp('\\\\[/bnrt\\\\]'), Series(RegExp('\\\\u'), HEX, HEX, HEX, HEX))
CHARACTERS = ZeroOrMore(Alternative(RegExp('[^"\\\\]+'), ESCAPE))
DOT = Token(".")
FRAC = Option(Series(DOT, RegExp('[0-9]+')))
NEG = Token("-")
INT = Alternative(Series(Option(NEG), RegExp('[0-9]')), RegExp('[1-9][0-9]+'))
HEX = RegExp('[0-9a-fA-F][0-9a-fA-F]')
UNICODE = Series(Series(DropToken("\\u"), dwsp__), HEX, HEX)
ESCAPE = Alternative(RegExp('\\\\[/bnrt\\\\]'), UNICODE)
PLAIN = RegExp('[^"\\\\]+')
_CHARACTERS = ZeroOrMore(Alternative(PLAIN, ESCAPE))
null = Series(Token("null"), dwsp__)
bool = Alternative(Series(RegExp('true'), dwsp__), Series(RegExp('false'), dwsp__))
number = Series(INT, FRAC, EXP, dwsp__)
string = Series(DropToken('"'), CHARACTERS, DropToken('"'), dwsp__, mandatory=1, err_msgs=string_err_msg__, skip=string_skip__)
array = Series(Series(DropToken("["), dwsp__), Option(Series(element, ZeroOrMore(Series(Series(DropToken(","), dwsp__), element)))), Series(DropToken("]"), dwsp__))
member = Series(string, Series(DropToken(":"), dwsp__), element, mandatory=1, err_msgs=member_err_msg__)
object = Series(Series(DropToken("{"), dwsp__), Option(Series(member, ZeroOrMore(Series(Series(DropToken(","), dwsp__), member, mandatory=1)))), Series(DropToken("}"), dwsp__))
element.set(Alternative(object, array, string, number, bool, null))
json = Series(dwsp__, element, EOF)
string = Series(DropToken('"'), _CHARACTERS, DropToken('"'), dwsp__, mandatory=1, err_msgs=string_err_msg__, skip=string_skip__)
array = Series(Series(DropToken("["), dwsp__), Option(Series(_element, ZeroOrMore(Series(Series(DropToken(","), dwsp__), _element)))), Series(DropToken("]"), dwsp__))
member = Series(string, Series(DropToken(":"), dwsp__), _element, mandatory=1, err_msgs=member_err_msg__)
_members = Series(member, ZeroOrMore(Series(Series(DropToken(","), dwsp__), member, Lookahead(Alternative(Series(DropToken(","), dwsp__), Series(DropToken("}"), dwsp__))), mandatory=1)))
object = Series(Series(DropToken("{"), dwsp__), ZeroOrMore(_members), Series(DropToken("}"), dwsp__), mandatory=2)
_element.set(Alternative(object, array, string, number, bool, null))
json = Series(dwsp__, _element, _EOF)
root__ = json
def get_grammar() -> jsonGrammar:
......@@ -114,24 +119,9 @@ def get_grammar() -> jsonGrammar:
json_AST_transformation_table = {
# AST Transformations for the json-grammar
# "<": flatten,
"json": [remove_nodes('EOF'), replace_by_single_child],
"element": [replace_by_single_child],
"object": [],
"member": [],
"array": [],
"string": [collapse],
"json": [replace_by_single_child],
"number": [collapse],
"bool": [],
"null": [],
"CHARACTERS": [],
"ESCAPE": [],
"HEX": [],
"INT": [],
"FRAC": [],
"EXP": [],
"EOF": [],
# "*": replace_by_single_child
"string": [reduce_single_child],
}
......
......@@ -49,7 +49,7 @@ def run_grammar_tests(glob_pattern, get_grammar, get_transformer):
if __name__ == '__main__':
argv = sys.argv[:]
if len(argv) > 1 and sys.argv[1] == "--debug":
LOGGING = True
LOGGING = 'LOGS'
del argv[1]
if (len(argv) >= 2 and (argv[1].endswith('.ebnf') or
os.path.splitext(argv[1])[1].lower() in testing.TEST_READERS.keys())):
......
......@@ -930,6 +930,7 @@ class TestMetaParser:
self.mp = MetaParser()
self.mp.grammar = Grammar() # override placeholder warning
self.mp.pname = "named"
self.mp.anonymous = False
self.mp.tag_name = self.mp.pname
def test_return_value(self):
......@@ -956,6 +957,7 @@ class TestMetaParser:
nd = self.mp._return_value(EMPTY_NODE)
assert nd.tag_name == 'named' and not nd.children, nd.as_sxpr()
self.mp.pname = ''
self.mp.anonymous = True
self.mp.tag_name = ':unnamed'
nd = self.mp._return_value(Node('tagged', 'non-empty'))
assert nd.tag_name == 'tagged', nd.as_sxpr()
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment