Commit a21decfb authored by di68kap's avatar di68kap
Browse files

- check_examples.py - script added: just quickly check if all(!) examples are still working

parent 447927f6
......@@ -59,7 +59,7 @@ class ArithmeticGrammar(Grammar):
digit = Forward()
expression = Forward()
variable = Forward()
source_hash__ = "c4e6e090ef9673b972ba18ef39fe7c8e"
source_hash__ = "120070baa84f5a2bd1bbb900627078fc"
parser_initialization__ = "upon instantiation"
resume_rules__ = {}
COMMENT__ = r''
......@@ -76,10 +76,13 @@ class ArithmeticGrammar(Grammar):
root__ = expression
def get_grammar() -> ArithmeticGrammar:
global GLOBALS
try:
grammar = GLOBALS.Arithmetic_1_grammar_singleton
except AttributeError:
GLOBALS.Arithmetic_1_grammar_singleton = ArithmeticGrammar()
if hasattr(get_grammar, 'python_src__'):
GLOBALS.Arithmetic_1_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = GLOBALS.Arithmetic_1_grammar_singleton
return grammar
......
......@@ -57,7 +57,7 @@ class BibTeXGrammar(Grammar):
r"""Parser for a BibTeX source file.
"""
text = Forward()
source_hash__ = "569bee4a051ea4d9f625ad9bbd46a7a2"
source_hash__ = "e402951b290cb0fce63ba0cbca3f23e9"
parser_initialization__ = "upon instantiation"
resume_rules__ = {}
COMMENT__ = r'(?i)%.*(?:\n|$)'
......@@ -83,10 +83,13 @@ class BibTeXGrammar(Grammar):
root__ = bibliography
def get_grammar() -> BibTeXGrammar:
global GLOBALS
try:
grammar = GLOBALS.BibTeX_1_grammar_singleton
except AttributeError:
GLOBALS.BibTeX_1_grammar_singleton = BibTeXGrammar()
if hasattr(get_grammar, 'python_src__'):
GLOBALS.BibTeX_1_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = GLOBALS.BibTeX_1_grammar_singleton
return grammar
......
......@@ -57,7 +57,7 @@ class EBNFGrammar(Grammar):
r"""Parser for an EBNF source file.
"""
expression = Forward()
source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
source_hash__ = "5e9e65a057bec7da29989dba47f40394"
parser_initialization__ = "upon instantiation"
resume_rules__ = {}
COMMENT__ = r'#.*(?:\n|$)'
......@@ -86,10 +86,13 @@ class EBNFGrammar(Grammar):
root__ = syntax
def get_grammar() -> EBNFGrammar:
global GLOBALS
try:
grammar = GLOBALS.EBNF_1_grammar_singleton
except AttributeError:
GLOBALS.EBNF_1_grammar_singleton = EBNFGrammar()
if hasattr(get_grammar, 'python_src__'):
GLOBALS.EBNF_1_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = GLOBALS.EBNF_1_grammar_singleton
return grammar
......
......@@ -153,10 +153,13 @@ class LaTeXGrammar(Grammar):
root__ = latexdoc
def get_grammar() -> LaTeXGrammar:
global GLOBALS
try:
grammar = GLOBALS.LaTeX_1_grammar_singleton
except AttributeError:
GLOBALS.LaTeX_1_grammar_singleton = LaTeXGrammar()
if hasattr(get_grammar, 'python_src__'):
GLOBALS.LaTeX_1_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = GLOBALS.LaTeX_1_grammar_singleton
return grammar
......
......@@ -26,7 +26,7 @@ from DHParser import logging, is_filename, load_if_file, \
ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \
grammar_changed, last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, TransformationDict, Token, \
traverse, remove_children_if, is_anonymous, \
traverse, remove_children_if, is_anonymous, GLOBALS, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, \
is_empty, is_expendable, collapse, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \
......@@ -56,216 +56,7 @@ def get_preprocessor() -> PreprocessorFunc:
#######################################################################
class XMLGrammar(Grammar):
r"""Parser for a XML source file, with this grammar:
# XML-grammar, see https://www.w3.org/TR/REC-xml/
#######################################################################
#
# EBNF-Directives
#
#######################################################################
@ whitespace = /\s*/ # insignificant whitespace, signified by ~
@ literalws = none # literals have no implicit whitespace
@ comment = // # no implicit comments
@ ignorecase = False # literals and regular expressions are case-sensitive
#######################################################################
#
# Document Frame and Prolog
#
#######################################################################
document = prolog element [Misc] EOF
prolog = [ ~ XMLDecl ] [Misc] [doctypedecl [Misc]]
XMLDecl = '<?xml' VersionInfo [EncodingDecl] [SDDecl] ~ '?>'
VersionInfo = ~ 'version' ~ '=' ~ ("'" VersionNum "'" | '"' VersionNum '"')
VersionNum = /[0-9]+\.[0-9]+/
EncodingDecl = ~ 'encoding' ~ '=' ~ ("'" EncName "'" | '"' EncName '"')
EncName = /[A-Za-z][A-Za-z0-9._\-]*/
SDDecl = ~ 'standalone' ~ '=' ~ (("'" Yes | No "'") | ('"' Yes | No '"'))
Yes = 'yes'
No = 'no'
#######################################################################
#
# Document Type Definition
#
#######################################################################
doctypedecl = '<!DOCTYPE' ~ Name [~ ExternalID] ~ ['[' intSubset ']' ~] '>'
intSubset = { markupdecl | DeclSep }
DeclSep = PEReference | S
markupdecl = elementdecl | AttlistDecl | EntityDecl | NotationDecl | PI | Comment
extSubset = [TextDecl] extSubsetDecl
extSubsetDecl = { markupdecl | conditionalSect | DeclSep }
conditionalSect = includeSect | ignoreSect
includeSect = '<![' ~ 'INCLUDE' ~ '[' extSubsetDecl ']]>'
ignoreSect = '<![' ~ 'IGNORE' ~ '[' ignoreSectContents ']]>'
ignoreSectContents = IgnoreChars {'<![' ignoreSectContents ']]>' IgnoreChars}
extParsedEnt = [TextDecl] content
TextDecl = '<?xml' [VersionInfo] EncodingDecl ~ '?>'
elementdecl = '<!ELEMENT' §S Name ~ contentspec ~ '>'
contentspec = EMPTY | ANY | Mixed | children
EMPTY = 'EMPTY'
ANY = 'ANY'
Mixed = '(' ~ '#PCDATA' { ~ '|' ~ Name } ~ ')*'
| '(' ~ '#PCDATA' ~ ')'
children = (choice | seq) ['?' | '*' | '+']
choice = '(' ~ { ~ '|' ~ cp }+ ~ ')'
cp = (Name | choice | seq) ['?' | '*' | '+']
seq = '(' ~ cp { ~ ',' ~ cp } ~ ')'
AttlistDecl = '<!ATTLIST' §S Name { ~ AttDef } ~ '>'
AttDef = Name ~ §AttType S DefaultDecl
AttType = StringType | TokenizedType | EnumeratedType
StringType = 'CDATA'
TokenizedType = ID | IDREF | IDREFS | ENTITY | ENTITIES | NMTOKEN | NMTOKENS
ID = 'ID'
IDREF = 'IDREF'
IDREFS = 'IDREFS'
ENTITY = 'ENTITY'
ENTITIES = 'ENTITIES'
NMTOKEN = 'NMTOKEN'
NMTOKENS = 'NMTOKENS'
EnumeratedType = NotationType | Enumeration
NotationType = 'NOTATION' S '(' ~ Name { ~ '|' ~ Name } ~ ')'
Enumeration = '(' ~ Nmtoken { ~ '|' ~ Nmtoken } ~ ')'
DefaultDecl = REQUIRED | IMPLIED | FIXED
REQUIRED = '#REQUIRED'
IMPLIED = '#IMPLIED'
FIXED = ['#FIXED' S] AttValue
EntityDecl = GEDecl | PEDecl
GEDecl = '<!ENTITY' S Name §S EntityDef ~ '>'
PEDecl = '<!ENTITY' S '%' §S Name S PEDef ~ '>'
EntityDef = EntityValue | ExternalID [NDataDecl]
PEDef = EntityValue | ExternalID
NotationDecl = '<!NOTATION' §S Name ~ (ExternalID | PublicID) ~ '>'
ExternalID = 'SYSTEM' §S SystemLiteral
PublicID = 'PUBLIC' §S PubidLiteral
NDataDecl = 'NData' §S Name
#######################################################################
#
# Logical Structures
#
#######################################################################
element = emptyElement | STag §content ETag
STag = '<' TagName { ~ Attribute } ~ '>'
ETag = '</' §::TagName ~ '>'
emptyElement = '<' Name { ~ Attribute } ~ '/>'
TagName = Name
Attribute = Name ~ §'=' ~ AttValue
content = [ CharData ]
{ (element | Reference | CDSect | PI | Comment)
[CharData] }
#######################################################################
#
# Literals
#
#######################################################################
EntityValue = '"' { /[^%&"]+/ | PEReference | Reference } '"'
| "'" { /[^%&']+/ | PEReference | Reference } "'"
AttValue = '"' { /[^<&"]+/ | Reference } '"'
| "'" { /[^<&']+/ | Reference } "'"
SystemLiteral = '"' /[^"]*/ '"' | "'" /[^']*/ "'"
PubidLiteral = '"' [PubidChars] '"'
| "'" [PubidCharsSingleQuoted] "'"
#######################################################################
#
# References
#
#######################################################################
Reference = EntityRef | CharRef
EntityRef = '&' Name ';'
PEReference = '%' Name ';'
#######################################################################
#
# Names and Tokens
#
#######################################################################
Nmtokens = Nmtoken { / / Nmtoken }
Nmtoken = NameChars
Names = Name { / / Name }
Name = NameStartChar [NameChars]
NameStartChar = /_|:|[A-Z]|[a-z]
|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]
|[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]
|[\u2070-\u218F]|[\u2C00-\u2FEF]|[\u3001-\uD7FF]
|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]
|[\U00010000-\U000EFFFF]/
NameChars = /(?:_|:|-|\.|[A-Z]|[a-z]|[0-9]
|\u00B7|[\u0300-\u036F]|[\u203F-\u2040]
|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]
|[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]
|[\u2070-\u218F]|[\u2C00-\u2FEF]|[\u3001-\uD7FF]
|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]
|[\U00010000-\U000EFFFF])+/
#######################################################################
#
# Comments, Processing Instructions and CDATA sections
#
#######################################################################
Misc = { Comment | PI | S }+
Comment = '<!--' { CommentChars | /-(?!-)/ } '-->'
PI = '<?' PITarget [~ PIChars] '?>'
PITarget = !/X|xM|mL|l/ Name
CDSect = '<![CDATA[' CData ']]>'
#######################################################################
#
# Characters, Explicit Whitespace and End of File
#
#######################################################################
PubidCharsSingleQuoted = /(?:\x20|\x0D|\x0A|[a-zA-Z0-9]|[-()+,.\/:=?;!*#@$_%])+/
PubidChars = /(?:\x20|\x0D|\x0A|[a-zA-Z0-9]|[-'()+,.\/:=?;!*#@$_%])+/
CharData = /(?:(?!\]\]>)[^<&])+/
CData = /(?:(?!\]\]>)(?:\x09|\x0A|\x0D|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]))+/
IgnoreChars = /(?:(?!(?:<!\[)|(?:\]\]>))(?:\x09|\x0A|\x0D|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]))+/
PIChars = /(?:(?!\?>)(?:\x09|\x0A|\x0D|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]))+/
CommentChars = /(?:(?!-)(?:\x09|\x0A|\x0D|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]))+/
CharRef = ('&#' /[0-9]+/ ';') | ('&#x' /[0-9a-fA-F]+/ ';')
Chars = /(?:\x09|\x0A|\x0D|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF])+/
Char = /\x09|\x0A|\x0D|[\u0020-\uD7FF]|[\uE000-\uFFFD]|[\U00010000-\U0010FFFF]/
S = /\s+/ # whitespace
EOF = !/./ # no more characters ahead, end of file reached
r"""Parser for a XML source file.
"""
DeclSep = Forward()
EncodingDecl = Forward()
......@@ -277,8 +68,9 @@ class XMLGrammar(Grammar):
extSubsetDecl = Forward()
ignoreSectContents = Forward()
markupdecl = Forward()
source_hash__ = "52808225879f254ab3099942adde3b59"
source_hash__ = "1c64c8f613952c5ab8e851da15f65ec3"
parser_initialization__ = "upon instantiation"
resume_rules__ = {}
COMMENT__ = r''
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
......@@ -382,12 +174,14 @@ class XMLGrammar(Grammar):
root__ = document
def get_grammar() -> XMLGrammar:
global thread_local_XML_grammar_singleton
global GLOBALS
try:
grammar = thread_local_XML_grammar_singleton
except NameError:
thread_local_XML_grammar_singleton = XMLGrammar()
grammar = thread_local_XML_grammar_singleton
grammar = GLOBALS.XML_1_grammar_singleton
except AttributeError:
GLOBALS.XML_1_grammar_singleton = XMLGrammar()
if hasattr(get_grammar, 'python_src__'):
GLOBALS.XML_1_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = GLOBALS.XML_1_grammar_singleton
return grammar
......
......@@ -46,8 +46,7 @@ if __name__ == '__main__':
if arg.endswith('.ebnf'):
recompile_grammar(arg, force=True)
else:
recompile_grammar(os.path.join(scriptpath, 'XML.ebnf'),
force=False)
recompile_grammar(os.path.join(scriptpath, 'XML.ebnf'), force=False)
sys.path.append('.')
from XMLCompiler import get_grammar, get_transformer
error_report = run_grammar_tests(glob_pattern=arg)
......
......@@ -34,6 +34,16 @@ Yes = 'yes'
No = 'no'
#######################################################################
#
# Document Type Definition stub
#
#######################################################################
doctypedecl = '<!DOCTYPE' ~ Name [~ ExternalID] ~ ['[' intSubset ']' ~] '>'
ExternalID = 'SYSTEM' §S SystemLiteral
intSubset = /(?:(?!\][^\]])[^<&])+/
#######################################################################
#
# Logical Structures
......
#!/usr/bin/python
#######################################################################
#
# SYMBOLS SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
import collections
from functools import partial
import os
import sys
sys.path.append(r'C:\Users\di68kap\PycharmProjects\DHParser')
try:
import regex as re
except ImportError:
import re
from DHParser import logging, is_filename, load_if_file, MockParser, \
Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, \
Lookbehind, Lookahead, Alternative, Pop, Token, Synonym, AllOf, SomeOf, Unordered, \
Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \
ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \
grammar_changed, last_value, counterpart, accumulate, PreprocessorFunc, \
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_whitespace, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_expendables, remove_empty, remove_tokens, flatten, is_whitespace, is_empty, \
is_expendable, collapse, collapse_if, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \
remove_nodes, remove_content, remove_brackets, replace_parser, remove_anonymous_tokens, \
keep_children, is_one_of, not_one_of, has_content, apply_if, remove_first, remove_last, \
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \
replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \
error_on, recompile_grammar, GLOBALS
#######################################################################
#
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def XMLSnippetPreprocessor(text):
return text, lambda i: i
def get_preprocessor() -> PreprocessorFunc:
return XMLSnippetPreprocessor
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class XMLSnippetGrammar(Grammar):
r"""Parser for a XMLSnippet source file.
"""
Name = Forward()
element = Forward()
source_hash__ = "2efb839574bee3f63b5b9d1ea5c96386"
parser_initialization__ = "upon instantiation"
resume_rules__ = {}
COMMENT__ = r''
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wsp__ = Whitespace(WSP_RE__)
EOF = NegativeLookahead(RegExp('.'))
S = RegExp('\\s+')
Char = RegExp('\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]')
Chars = RegExp('(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF])+')
CharRef = Alternative(Series(Token('&#'), RegExp('[0-9]+'), Token(';')), Series(Token('&#x'), RegExp('[0-9a-fA-F]+'), Token(';')))
CommentChars = RegExp('(?:(?!-)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
PIChars = RegExp('(?:(?!\\?>)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
IgnoreChars = RegExp('(?:(?!(?:<!\\[)|(?:\\]\\]>))(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
CData = RegExp('(?:(?!\\]\\]>)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
CharData = RegExp('(?:(?!\\]\\]>)[^<&])+')
PubidChars = RegExp("(?:\\x20|\\x0D|\\x0A|[a-zA-Z0-9]|[-'()+,./:=?;!*#@$_%])+")
PubidCharsSingleQuoted = RegExp('(?:\\x20|\\x0D|\\x0A|[a-zA-Z0-9]|[-()+,./:=?;!*#@$_%])+')
CDSect = Series(Token('<![CDATA['), CData, Token(']]>'))
PITarget = Series(NegativeLookahead(RegExp('X|xM|mL|l')), Name)
PI = Series(Token('<?'), PITarget, Option(Series(wsp__, PIChars)), Token('?>'))
Comment = Series(Token('<!--'), ZeroOrMore(Alternative(CommentChars, RegExp('-(?!-)'))), Token('-->'))
Misc = OneOrMore(Alternative(Comment, PI, S))
NameChars = RegExp('(?x)(?:_|:|-|\\.|[A-Z]|[a-z]|[0-9]\n |\\u00B7|[\\u0300-\\u036F]|[\\u203F-\\u2040]\n |[\\u00C0-\\u00D6]|[\\u00D8-\\u00F6]|[\\u00F8-\\u02FF]\n |[\\u0370-\\u037D]|[\\u037F-\\u1FFF]|[\\u200C-\\u200D]\n |[\\u2070-\\u218F]|[\\u2C00-\\u2FEF]|[\\u3001-\\uD7FF]\n |[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]\n |[\\U00010000-\\U000EFFFF])+')
NameStartChar = RegExp('(?x)_|:|[A-Z]|[a-z]\n |[\\u00C0-\\u00D6]|[\\u00D8-\\u00F6]|[\\u00F8-\\u02FF]\n |[\\u0370-\\u037D]|[\\u037F-\\u1FFF]|[\\u200C-\\u200D]\n |[\\u2070-\\u218F]|[\\u2C00-\\u2FEF]|[\\u3001-\\uD7FF]\n |[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]\n |[\\U00010000-\\U000EFFFF]')
Name.set(Series(NameStartChar, Option(NameChars)))
Names = Series(Name, ZeroOrMore(Series(RegExp(' '), Name)))
Nmtoken = Synonym(NameChars)
Nmtokens = Series(Nmtoken, ZeroOrMore(Series(RegExp(' '), Nmtoken)))
PEReference = Series(Token('%'), Name, Token(';'))
EntityRef = Series(Token('&'), Name, Token(';'))
Reference = Alternative(EntityRef, CharRef)
PubidLiteral = Alternative(Series(Token('"'), Option(PubidChars), Token('"')), Series(Token("'"), Option(PubidCharsSingleQuoted), Token("'")))
SystemLiteral = Alternative(Series(Token('"'), RegExp('[^"]*'), Token('"')), Series(Token("'"), RegExp("[^']*"), Token("'")))
AttValue = Alternative(Series(Token('"'), ZeroOrMore(Alternative(RegExp('[^<&"]+'), Reference)), Token('"')), Series(Token("'"), ZeroOrMore(Alternative(RegExp("[^<&']+"), Reference)), Token("'")))
EntityValue = Alternative(Series(Token('"'), ZeroOrMore(Alternative(RegExp('[^%&"]+'), PEReference, Reference)), Token('"')), Series(Token("'"), ZeroOrMore(Alternative(RegExp("[^%&']+"), PEReference, Reference)), Token("'")))
content = Series(Option(CharData), ZeroOrMore(Series(Alternative(element, Reference, CDSect, PI, Comment), Option(CharData))))
Attribute = Series(Name, wsp__, Token('='), wsp__, AttValue, mandatory=2)
TagName = Capture(Name)
emptyElement = Series(Token('<'), Name, ZeroOrMore(Series(wsp__, Attribute)), wsp__, Token('/>'))
ETag = Series(Token('</'), Pop(TagName), wsp__, Token('>'), mandatory=1)
STag = Series(Token('<'), TagName, ZeroOrMore(Series(wsp__, Attribute)), wsp__, Token('>'))
element.set(Alternative(emptyElement, Series(STag, content, ETag, mandatory=1)))
intSubset = RegExp('(?:(?!\\][^\\]])[^<&])+')
ExternalID = Series(Token('SYSTEM'), S, SystemLiteral, mandatory=1)
doctypedecl = Series(Token('<!DOCTYPE'), wsp__, Name, Option(Series(wsp__, ExternalID)), wsp__, Option(Series(Token('['), intSubset, Token(']'), wsp__)), Token('>'))
No = Token('no')
Yes = Token('yes')
SDDecl = Series(wsp__, Token('standalone'), wsp__, Token('='), wsp__, Alternative(Alternative(Series(Token("'"), Yes), Series(No, Token("'"))), Alternative(Series(Token('"'), Yes), Series(No, Token('"')))))
EncName = RegExp('[A-Za-z][A-Za-z0-9._\\-]*')
EncodingDecl = Series(wsp__, Token('encoding'), wsp__, Token('='), wsp__, Alternative(Series(Token("'"), EncName, Token("'")), Series(Token('"'), EncName, Token('"'))))
VersionNum = RegExp('[0-9]+\\.[0-9]+')
VersionInfo = Series(wsp__, Token('version'), wsp__, Token('='), wsp__, Alternative(Series(Token("'"), VersionNum, Token("'")), Series(Token('"'), VersionNum, Token('"'))))
XMLDecl = Series(Token('<?xml'), VersionInfo, Option(EncodingDecl), Option(SDDecl), wsp__, Token('?>'))
prolog = Series(Option(Series(wsp__, XMLDecl)), Option(Misc), Option(Series(doctypedecl, Option(Misc))))
document = Series(prolog, element, Option(Misc), EOF)
root__ = document
def get_grammar() -> XMLSnippetGrammar:
global GLOBALS
try:
grammar = GLOBALS.XMLSnippet_1_grammar_singleton
except AttributeError:
GLOBALS.XMLSnippet_1_grammar_singleton = XMLSnippetGrammar()
if hasattr(get_grammar, 'python_src__'):
GLOBALS.XMLSnippet_1_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = GLOBALS.XMLSnippet_1_grammar_singleton
return grammar
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
XMLSnippet_AST_transformation_table = {
# AST Transformations for the XMLSnippet-grammar
"<": remove_empty,
"document": [],
"prolog": [],
"XMLDecl": [],
"VersionInfo": [],
"VersionNum": [],
"EncodingDecl": [],
"EncName": [],
"SDDecl": [],
"Yes": [],
"No": [],
"doctypedecl": [],
"ExternalID": [],
"intSubset": [],
"element": [replace_or_reduce],
"STag": [],
"ETag": [],
"emptyElement": [],
"TagName": [],
"Attribute": [],
"content": [],
"EntityValue": [replace_or_reduce],
"AttValue": [replace_or_reduce],
"SystemLiteral": [replace_or_reduce],
"PubidLiteral": [replace_or_reduce],
"Reference": [replace_or_reduce],
"EntityRef": [],
"PEReference": [],
"Nmtokens": [],
"Nmtoken": [reduce_single_child],
"Names": [],
"Name": [],
"NameStartChar": [],
"NameChars": [],
"Misc": [],
"Comment": [],
"PI": [],
"PITarget": [],
"CDSect": [],
"PubidCharsSingleQuoted": [],
"PubidChars": [],
"CharData": [],
"CData": [],
"IgnoreChars": [],
"PIChars": [],
"CommentChars": [],
"CharRef": [replace_or_reduce],
"Chars": [],
"Char": [],
"S": [],
"EOF": [],
":Token": reduce_single_child,
"*": replace_by_single_child
}
def XMLSnippetTransform() -> TransformationDict:
return partial(traverse, processing_table=XMLSnippet_AST_transformation_table.copy())
def get_transformer() -> TransformationFunc:
try:
transformer = GLOBALS.XMLSnippet_1_transformer_singleton
except AttributeError:
GLOBALS.XMLSnippet_1_transformer_singleton = XMLSnippetTransform()
transformer = GLOBALS.XMLSnippet_1_transformer_singleton
return transformer
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
class XMLSnippetCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a XMLSnippet source file.
"""
def __init__(self):
super(XMLSnippetCompiler, self).__init__()
def _reset(self):
super()._reset()
# initialize your variables here, not in the constructor!
def on_document(self, node):
return self.fallback_compiler(node)
# def on_prolog(self, node):
# return node
# def on_XMLDecl(self, node):
# return node
# def on_VersionInfo(self, node):