24.09., 9:00 - 11:00: Due to updates GitLab will be unavailable for some minutes between 09:00 and 11:00.

ebnf.py 59.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# ebnf.py - EBNF -> Python-Parser compilation for DHParser
#
# Copyright 2016  by Eckhart Arnold (arnold@badw.de)
#                 Bavarian Academy of Sciences an Humanities (badw.de)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.  See the License for the specific language governing
# permissions and limitations under the License.
17 18


19 20 21 22 23
"""
Module ``ebnf`` provides a self-hosting parser for EBNF-Grammars as
well as an EBNF-compiler that compiles an EBNF-Grammar into a
DHParser based Grammar class that can be executed to parse source text
conforming to this grammar into contrete syntax trees.
24 25
"""

26

27
from collections import OrderedDict
28
from functools import partial
eckhart's avatar
eckhart committed
29 30
import keyword
import os
Eckhart Arnold's avatar
Eckhart Arnold committed
31
from typing import Callable, Dict, List, Set, Tuple, Sequence, Union, Optional, Any, cast
32

eckhart's avatar
eckhart committed
33
from DHParser.compile import CompilerError, Compiler, compile_source, visitor_name
34
from DHParser.error import Error
35
from DHParser.parse import Grammar, mixin_comment, Forward, RegExp, DropWhitespace, \
36 37
    NegativeLookahead, Alternative, Series, Option, OneOrMore, ZeroOrMore, Token, \
    GrammarError
38
from DHParser.preprocess import nil_preprocessor, PreprocessorFunc
eckhart's avatar
eckhart committed
39
from DHParser.syntaxtree import Node, WHITESPACE_PTYPE, TOKEN_PTYPE
eckhart's avatar
eckhart committed
40
from DHParser.toolkit import load_if_file, escape_re, md5, sane_parser_name, re, expand_table, \
41
    GLOBALS, get_config_value, unrepr, compile_python_object, DHPARSER_DIR
eckhart's avatar
eckhart committed
42
from DHParser.transform import TransformationFunc, traverse, remove_brackets, \
Eckhart Arnold's avatar
Eckhart Arnold committed
43
    reduce_single_child, replace_by_single_child, remove_whitespace, remove_empty, \
eckhart's avatar
eckhart committed
44
    remove_tokens, flatten, forbid, assert_content
45
from DHParser.versionnumber import __version__
Eckhart Arnold's avatar
Eckhart Arnold committed
46

eckhart's avatar
eckhart committed
47

48
__all__ = ('get_ebnf_preprocessor',
49 50 51 52
           'get_ebnf_grammar',
           'get_ebnf_transformer',
           'get_ebnf_compiler',
           'EBNFGrammar',
53
           'EBNFTransform',
Eckhart Arnold's avatar
Eckhart Arnold committed
54
           'EBNFCompilerError',
55
           'EBNFCompiler',
56
           'grammar_changed',
eckhart's avatar
eckhart committed
57
           'compile_ebnf',
58
           'PreprocessorFactoryFunc',
59 60
           'ParserFactoryFunc',
           'TransformerFactoryFunc',
61
           'CompilerFactoryFunc')
62 63


64 65 66 67 68 69 70
########################################################################
#
# source code support
#
########################################################################


71
dhparser_parentdir = os.path.dirname(DHPARSER_DIR)
72 73 74 75 76 77 78 79


DHPARSER_IMPORTS = '''
import collections
from functools import partial
import os
import sys

80
sys.path.append(r'{dhparser_parentdir}')
81 82 83 84 85 86 87 88 89 90

try:
    import regex as re
except ImportError:
    import re
from DHParser import logging, is_filename, load_if_file, \\
    Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, DropWhitespace, \\
    Lookbehind, Lookahead, Alternative, Pop, Token, DropToken, Synonym, AllOf, SomeOf, \\
    Unordered, Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \\
    ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \\
eckhart's avatar
eckhart committed
91
    grammar_changed, last_value, counterpart, PreprocessorFunc, is_empty, \\
92 93 94
    Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \\
    remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \\
    reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \\
di68kap's avatar
di68kap committed
95
    replace_by_children, remove_empty, remove_tokens, flatten, is_insignificant_whitespace, \\
Eckhart Arnold's avatar
Eckhart Arnold committed
96 97
    collapse, collapse_if, replace_content, WHITESPACE_PTYPE, TOKEN_PTYPE, \\
    remove_nodes, remove_content, remove_brackets, change_tag_name, remove_anonymous_tokens, \\
98 99 100
    keep_children, is_one_of, not_one_of, has_content, apply_if, remove_first, remove_last, \\
    remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \\
    replace_content, replace_content_by, forbid, assert_content, remove_infix_operator, \\
101 102 103
    error_on, recompile_grammar, left_associative, lean_left, set_config_value, \\
    get_config_value, XML_SERIALIZATION, SXPRESSION_SERIALIZATION, COMPACT_SERIALIZATION, \\
    JSON_SERIALIZATION, CONFIG_PRESET, GLOBALS 
104
'''.format(dhparser_parentdir=dhparser_parentdir)
105 106


Eckhart Arnold's avatar
Eckhart Arnold committed
107 108 109 110 111 112 113
########################################################################
#
# EBNF scanning
#
########################################################################


114
def get_ebnf_preprocessor() -> PreprocessorFunc:
eckhart's avatar
eckhart committed
115 116 117 118 119
    """
    Returns the preprocessor function for the EBNF compiler.
    As of now, no preprocessing is needed for EBNF-sources. Therefore,
    just a dummy function is returned.
    """
120
    return nil_preprocessor
Eckhart Arnold's avatar
Eckhart Arnold committed
121 122 123 124 125 126 127 128


########################################################################
#
# EBNF parsing
#
########################################################################

129

di68kap's avatar
di68kap committed
130
class EBNFGrammar(Grammar):
eckhart's avatar
eckhart committed
131
    r"""
eckhart's avatar
eckhart committed
132 133 134 135 136
    Parser for an EBNF source file, with this grammar:

    @ comment    = /#.*(?:\n|$)/                    # comments start with '#' and eat all chars up to and including '\n'
    @ whitespace = /\s*/                            # whitespace includes linefeed
    @ literalws  = right                            # trailing whitespace of literals will be ignored tacitly
137 138 139
    @ drop       = whitespace                       # do not include whitespace in concrete syntax tree

    #: top-level
eckhart's avatar
eckhart committed
140 141 142 143 144

    syntax     = [~//] { definition | directive } §EOF
    definition = symbol §"=" expression
    directive  = "@" §symbol "=" (regexp | literal | symbol) { "," (regexp | literal | symbol) }

145 146
    #: components

eckhart's avatar
eckhart committed
147 148 149 150 151 152 153 154 155 156 157 158 159
    expression = term { "|" term }
    term       = { ["§"] factor }+                       # "§" means all following factors mandatory
    factor     = [flowmarker] [retrieveop] symbol !"="   # negative lookahead to be sure it's not a definition
               | [flowmarker] literal
               | [flowmarker] plaintext
               | [flowmarker] regexp
               | [flowmarker] whitespace
               | [flowmarker] oneormore
               | [flowmarker] group
               | [flowmarker] unordered
               | repetition
               | option

160 161
    #: flow-operators

eckhart's avatar
eckhart committed
162 163 164 165
    flowmarker = "!"  | "&"                         # '!' negative lookahead, '&' positive lookahead
               | "-!" | "-&"                        # '-' negative lookbehind, '-&' positive lookbehind
    retrieveop = "::" | ":"                         # '::' pop, ':' retrieve

166 167
    #: groups

eckhart's avatar
eckhart committed
168 169 170 171 172 173
    group      = "(" §expression ")"
    unordered  = "<" §expression ">"                # elements of expression in arbitrary order
    oneormore  = "{" expression "}+"
    repetition = "{" §expression "}"
    option     = "[" §expression "]"

174 175
    #: leaf-elements

eckhart's avatar
eckhart committed
176
    symbol     = /(?!\d)\w+/~                       # e.g. expression, factor, parameter_list
177 178 179 180
    literal    = /"(?:(?<!\\)\\"|[^"])*?"/~         # e.g. "(", '+', 'while'
               | /'(?:(?<!\\)\\'|[^'])*?'/~         # whitespace following literals will be ignored tacitly.
    plaintext  = /`(?:(?<!\\)\\`|[^`])*?`/~         # like literal but does not eat whitespace
    regexp     = /\/(?:(?<!\\)\\(?:\/)|[^\/])*?\//~     # e.g. /\w+/, ~/#.*(?:\n|$)/~
eckhart's avatar
eckhart committed
181 182 183
    whitespace = /~/~                               # insignificant whitespace

    EOF = !/./
184
    """
di68kap's avatar
di68kap committed
185
    expression = Forward()
eckhart's avatar
eckhart committed
186
    source_hash__ = "82a7c668f86b83f86515078e6c9093ed"
187
    static_analysis_pending__ = []
eckhart's avatar
eckhart committed
188
    parser_initialization__ = ["upon instantiation"]
di68kap's avatar
di68kap committed
189 190
    COMMENT__ = r'#.*(?:\n|$)'
    WHITESPACE__ = r'\s*'
191
    WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
192
    wsp__ = DropWhitespace(WSP_RE__)
di68kap's avatar
di68kap committed
193
    EOF = NegativeLookahead(RegExp('.'))
194
    whitespace = Series(RegExp('~'), wsp__)
195
    regexp = Series(RegExp('/(?:(?<!\\\\)\\\\(?:/)|[^/])*?/'), wsp__)
eckhart's avatar
eckhart committed
196
    plaintext = Series(RegExp('`(?:(?<!\\\\)\\\\`|[^`])*?`'), wsp__)
197 198
    literal = Alternative(Series(RegExp('"(?:(?<!\\\\)\\\\"|[^"])*?"'), wsp__),
                          Series(RegExp("'(?:(?<!\\\\)\\\\'|[^'])*?'"), wsp__))
199 200 201 202 203 204 205 206 207 208
    symbol = Series(RegExp('(?!\\d)\\w+'), wsp__)
    option = Series(Series(Token("["), wsp__), expression, Series(Token("]"), wsp__), mandatory=1)
    repetition = Series(Series(Token("{"), wsp__), expression, Series(Token("}"), wsp__), mandatory=1)
    oneormore = Series(Series(Token("{"), wsp__), expression, Series(Token("}+"), wsp__))
    unordered = Series(Series(Token("<"), wsp__), expression, Series(Token(">"), wsp__), mandatory=1)
    group = Series(Series(Token("("), wsp__), expression, Series(Token(")"), wsp__), mandatory=1)
    retrieveop = Alternative(Series(Token("::"), wsp__), Series(Token(":"), wsp__))
    flowmarker = Alternative(Series(Token("!"), wsp__), Series(Token("&"), wsp__),
                             Series(Token("-!"), wsp__), Series(Token("-&"), wsp__))
    factor = Alternative(Series(Option(flowmarker), Option(retrieveop), symbol,
eckhart's avatar
eckhart committed
209 210 211 212
                                NegativeLookahead(Series(Token("="), wsp__))), Series(Option(flowmarker), literal),
                         Series(Option(flowmarker), plaintext), Series(Option(flowmarker), regexp),
                         Series(Option(flowmarker), whitespace), Series(Option(flowmarker), oneormore),
                         Series(Option(flowmarker), group), Series(Option(flowmarker), unordered), repetition, option)
213 214 215
    term = OneOrMore(Series(Option(Series(Token("§"), wsp__)), factor))
    expression.set(Series(term, ZeroOrMore(Series(Series(Token("|"), wsp__), term))))
    directive = Series(Series(Token("@"), wsp__), symbol, Series(Token("="), wsp__),
eckhart's avatar
eckhart committed
216 217
                       Alternative(regexp, literal, symbol),
                       ZeroOrMore(Series(Series(Token(","), wsp__), Alternative(regexp, literal, symbol))), mandatory=1)
218
    definition = Series(symbol, Series(Token("="), wsp__), expression, mandatory=1)
eckhart's avatar
eckhart committed
219
    syntax = Series(Option(Series(wsp__, RegExp(''))), ZeroOrMore(Alternative(definition, directive)), EOF, mandatory=2)
di68kap's avatar
di68kap committed
220 221 222
    root__ = syntax


223
def grammar_changed(grammar_class, grammar_source: str) -> bool:
224 225
    """
    Returns ``True`` if ``grammar_class`` does not reflect the latest
Eckhart Arnold's avatar
Eckhart Arnold committed
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
    changes of ``grammar_source``

    Parameters:
        grammar_class:  the parser class representing the grammar
            or the file name of a compiler suite containing the grammar
        grammar_source:  File name or string representation of the
            EBNF code of the grammar

    Returns (bool):
        True, if the source text of the grammar is different from the
        source from which the grammar class was generated
    """
    grammar = load_if_file(grammar_source)
    chksum = md5(grammar, __version__)
    if isinstance(grammar_class, str):
        # grammar_class = load_compiler_suite(grammar_class)[1]
        with open(grammar_class, 'r', encoding='utf8') as f:
            pycode = f.read()
eckhart's avatar
eckhart committed
244
        m = re.search(r'class \w*\(Grammar\)', pycode)
Eckhart Arnold's avatar
Eckhart Arnold committed
245 246 247 248 249 250 251 252 253 254
        if m:
            m = re.search('    source_hash__ *= *"([a-z0-9]*)"',
                          pycode[m.span()[1]:])
            return not (m and m.groups() and m.groups()[-1] == chksum)
        else:
            return True
    else:
        return chksum != grammar_class.source_hash__


255
def get_ebnf_grammar() -> EBNFGrammar:
Eckhart Arnold's avatar
Eckhart Arnold committed
256
    try:
257
        grammar = GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
258
        return grammar
259
    except AttributeError:
260 261
        GLOBALS.ebnf_grammar_singleton = EBNFGrammar()
        return GLOBALS.ebnf_grammar_singleton
Eckhart Arnold's avatar
Eckhart Arnold committed
262 263 264 265 266 267 268 269 270


########################################################################
#
# EBNF concrete to abstract syntax tree transformation and validation
#
########################################################################


271
EBNF_AST_transformation_table = {
272
    # AST Transformations for EBNF-grammar
273
    "<":
274
        [remove_empty],  # remove_whitespace
275
    "syntax":
276
        [],  # otherwise '"*": replace_by_single_child' would be applied
277
    "directive, definition":
eckhart's avatar
eckhart committed
278
        [flatten, remove_tokens('@', '=', ',')],
Eckhart Arnold's avatar
Eckhart Arnold committed
279
    "expression":
280
        [replace_by_single_child, flatten, remove_tokens('|')],  # remove_infix_operator],
281
    "term":
282 283
        [replace_by_single_child, flatten],  # supports both idioms:
                                             # "{ factor }+" and "factor { factor }"
284
    "factor, flowmarker, retrieveop":
285
        replace_by_single_child,
286
    "group":
287
        [remove_brackets, replace_by_single_child],
288 289
    "unordered":
        remove_brackets,
290
    "oneormore, repetition, option":
Eckhart Arnold's avatar
Eckhart Arnold committed
291
        [reduce_single_child, remove_brackets,
292
         forbid('repetition', 'option', 'oneormore'), assert_content(r'(?!§)(?:.|\n)*')],
293
    "symbol, literal, regexp":
Eckhart Arnold's avatar
Eckhart Arnold committed
294
        reduce_single_child,
295
    (TOKEN_PTYPE, WHITESPACE_PTYPE):
Eckhart Arnold's avatar
Eckhart Arnold committed
296
        reduce_single_child,
297
    "*":
298
        replace_by_single_child
299 300
}

301

Eckhart Arnold's avatar
Eckhart Arnold committed
302
def EBNFTransform() -> TransformationFunc:
303
    return partial(traverse, processing_table=EBNF_AST_transformation_table.copy())
di68kap's avatar
di68kap committed
304

eckhart's avatar
eckhart committed
305

306
def get_ebnf_transformer() -> TransformationFunc:
307
    try:
308
        transformer = GLOBALS.EBNF_transformer_singleton
309
    except AttributeError:
310 311
        GLOBALS.EBNF_transformer_singleton = EBNFTransform()
        transformer = GLOBALS.EBNF_transformer_singleton
312
    return transformer
Eckhart Arnold's avatar
Eckhart Arnold committed
313 314 315 316 317 318 319 320


########################################################################
#
# EBNF abstract syntax tree to Python parser compilation
#
########################################################################

321

322
PreprocessorFactoryFunc = Callable[[], PreprocessorFunc]
323
ParserFactoryFunc = Callable[[], Grammar]
324
TransformerFactoryFunc = Callable[[], TransformationFunc]
325 326
CompilerFactoryFunc = Callable[[], Compiler]

327 328 329
PREPROCESSOR_FACTORY = '''
def get_preprocessor() -> PreprocessorFunc:
    return {NAME}Preprocessor
330 331 332 333
'''


GRAMMAR_FACTORY = '''
334
def get_grammar() -> {NAME}Grammar:
335
    """Returns a thread/process-exclusive {NAME}Grammar-singleton."""
336
    try:
337
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
338
    except AttributeError:
339
        GLOBALS.{NAME}_{ID:08d}_grammar_singleton = {NAME}Grammar()
eckhart's avatar
eckhart committed
340
        if hasattr(get_grammar, 'python_src__'):
341 342
            GLOBALS.{NAME}_{ID:08d}_grammar_singleton.python_src__ = get_grammar.python_src__
        grammar = GLOBALS.{NAME}_{ID:08d}_grammar_singleton
343
    return grammar
344 345 346 347
'''


TRANSFORMER_FACTORY = '''
348 349 350
def Create{NAME}Transformer() -> TransformationFunc:
    """Creates a transformation function that does not share state with other
    threads or processes."""
351 352
    return partial(traverse, processing_table={NAME}_AST_transformation_table.copy())

353
def get_transformer() -> TransformationFunc:
354
    """Returns a thread/process-exclusive transformation function."""
355
    try:
356
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
357
    except AttributeError:
358
        GLOBALS.{NAME}_{ID:08d}_transformer_singleton = Create{NAME}Transformer()
359
        transformer = GLOBALS.{NAME}_{ID:08d}_transformer_singleton
360
    return transformer
361 362 363 364
'''


COMPILER_FACTORY = '''
eckhart's avatar
eckhart committed
365
def get_compiler() -> {NAME}Compiler:
366
    """Returns a thread/process-exclusive {NAME}Compiler-singleton."""
367
    try:
368
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
369
    except AttributeError:
370 371
        GLOBALS.{NAME}_{ID:08d}_compiler_singleton = {NAME}Compiler()
        compiler = GLOBALS.{NAME}_{ID:08d}_compiler_singleton
372
    return compiler
373 374
'''

Eckhart Arnold's avatar
Eckhart Arnold committed
375

376 377 378 379
WHITESPACE_TYPES = {'horizontal': r'[\t ]*',  # default: horizontal
                    'linefeed': r'[ \t]*\n?(?!\s*\n)[ \t]*',
                    'vertical': r'\s*'}

380 381 382
DROP_TOKEN  = 'token'
DROP_WSPC   = 'whitespace'
DROP_VALUES = {DROP_TOKEN, DROP_WSPC}
383

eckhart's avatar
eckhart committed
384 385
# Representation of Python code or, rather, something that will be output as Python code
ReprType = Union[str, unrepr]
eckhart's avatar
eckhart committed
386 387


388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
class EBNFDirectives:
    """
    A Record that keeps information about compiler directives
    during the compilation process.

    Attributes:
        whitespace:  the regular expression string for (insignificant)
                whitespace

        comment:  the regular expression string for comments

        literalws:  automatic whitespace eating next to literals. Can
                be either 'left', 'right', 'none', 'both'

        tokens:  set of the names of preprocessor tokens
        filter:  mapping of symbols to python filter functions that
                will be called on any retrieve / pop - operations on
                these symbols

        error:  mapping of symbols to tuples of match conditions and
                customized error messages. A match condition can be
                either a string or a regular expression. The first
                error message where the search condition matches will
                be displayed. An empty string '' as search condition
                always matches, so in case of multiple error messages,
                this condition should be placed at the end.

415 416 417 418 419 420 421 422 423 424 425 426
        skip:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for the series-parser when a mandatory item failed to
                match the following text.

        resume:  mapping of symbols to a list of search expressions. A
                search expressions can be either a string ot a regular
                expression. The closest match is the point of reentry
                for after a parsing error has error occurred. Other
                than the skip field, this configures resuming after
                the failing parser has returned.
427
    """
428
    __slots__ = ['whitespace', 'comment', 'literalws', 'tokens', 'filter', 'error', 'skip',
429
                 'resume', 'drop']
eckhart's avatar
eckhart committed
430

431 432 433 434
    def __init__(self):
        self.whitespace = WHITESPACE_TYPES['vertical']  # type: str
        self.comment = ''     # type: str
        self.literalws = {'right'}  # type: Collection[str]
eckhart's avatar
eckhart committed
435
        self.tokens = set()   # type: Collection[str]
436
        self.filter = dict()  # type: Dict[str, str]
eckhart's avatar
eckhart committed
437
        self.error = dict()   # type: Dict[str, List[Tuple[ReprType, ReprType]]]
438
        self.skip = dict()    # type: Dict[str, List[Union[unrepr, str]]]
439
        self.resume = dict()  # type: Dict[str, List[Union[unrepr, str]]]
440
        self.drop = set()     # type: Set[str]
441 442 443 444 445 446 447 448 449 450 451 452

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        assert hasattr(self, key)
        setattr(self, key, value)

    def keys(self):
        return self.__dict__.keys()


eckhart's avatar
eckhart committed
453
class EBNFCompilerError(CompilerError):
454
    """Error raised by `EBNFCompiler` class. (Not compilation errors
455
    in the strict sense, see `CompilationError` in module ``dsl.py``)"""
456 457 458
    pass


459
class EBNFCompiler(Compiler):
460 461
    """
    Generates a Parser from an abstract syntax tree of a grammar specified
462
    in EBNF-Notation.
463 464 465 466 467 468 469

    Instances of this class must be called with the root-node of the
    abstract syntax tree from an EBNF-specification of a formal language.
    The returned value is the Python-source-code of a Grammar class for
    this language that can be used to parse texts in this language.
    See classes `parser.Compiler` and `parser.Grammar` for more information.

470
    Additionally, class EBNFCompiler provides helper methods to generate
471 472 473 474 475
    code-skeletons for a preprocessor, AST-transformation and full
    compilation of the formal language. These method's names start with
    the prefix `gen_`.

    Attributes:
476
        current_symbols:  During compilation, a list containing the root
477 478 479 480
                node of the currently compiled definition as first element
                and then the nodes of the symbols that are referred to in
                the currently compiled definition.

481
        rules:  Dictionary that maps rule names to a list of Nodes that
482 483 484 485 486 487
                contain symbol-references in the definition of the rule.
                The first item in the list is the node of the rule-
                definition itself. Example:

                           `alternative = a | b`

488
                Now `[node.content for node in self.rules['alternative']]`
489 490
                yields `['alternative = a | b', 'a', 'b']`

491
        symbols:  A mapping of symbol names to their first usage (not
492 493
                their definition!) in the EBNF source.

494
        variables:  A set of symbols names that are used with the
495 496 497 498
                Pop or Retrieve operator. Because the values of these
                symbols need to be captured they are called variables.
                See `test_parser.TestPopRetrieve` for an example.

499
        recursive:  A set of symbols that are used recursively and
500 501
                therefore require a `Forward`-operator.

502
        definitions:  A dictionary of definitions. Other than `rules`
503 504
                this maps the symbols to their compiled definienda.

505
        deferred_tasks:  A list of callables that is filled during
506 507 508 509 510
                compilatation, but that will be executed only after
                compilation has finished. Typically, it contains
                sementatic checks that require information that
                is only available upon completion of compilation.

511
        root_symbol: The name of the root symbol.
512

513 514 515 516 517 518 519 520 521 522 523 524
        directives:  A record of all directives and their default values.

        defined_directives:  A set of all directives that have already been
                defined. With the exception of those directives contained
                in EBNFCompiler.REPEATABLE_DIRECTIVES, directives must only
                be defined once.

        consumed_custom_errors:  A set of symbols for which a custom error
                has been defined and(!) consumed during compilation. This
                allows to add a compiler error in those cases where (i) an
                error message has been defined but will never used or (ii)
                an error message is accidently used twice. For examples, see
525 526 527 528
                `test_ebnf.TestErrorCustomization`.

        consumed_skip_rules: The same as `consumed_custom_errors` only for
                in-series-resume-rules (aka 'skip-rules') for Series-parsers.
529 530 531

        re_flags:  A set of regular expression flags to be added to all
                regular expressions found in the current parsing process
532

eckhart's avatar
eckhart committed
533 534 535 536
        grammar_name:  The name of the grammar to be compiled

        grammar_source:  The source code of the grammar to be compiled.

537 538 539
        grammar_id: a unique id for every compiled grammar. (Required for
                disambiguation of of thread local variables storing
                compiled texts.)
540 541
    """
    COMMENT_KEYWORD = "COMMENT__"
542
    WHITESPACE_KEYWORD = "WSP_RE__"
Eckhart Arnold's avatar
Eckhart Arnold committed
543
    RAW_WS_KEYWORD = "WHITESPACE__"
544
    WHITESPACE_PARSER_KEYWORD = "wsp__"
545
    DROP_WHITESPACE_PARSER_KEYWORD = "dwsp__"
546
    RESUME_RULES_KEYWORD = "resume_rules__"
547
    SKIP_RULES_SUFFIX = '_skip__'
eckhart's avatar
eckhart committed
548 549 550
    ERR_MSG_SUFFIX = '_err_msg__'
    RESERVED_SYMBOLS = {WHITESPACE_KEYWORD, RAW_WS_KEYWORD, COMMENT_KEYWORD,
                        RESUME_RULES_KEYWORD, ERR_MSG_SUFFIX}
551
    AST_ERROR = "Badly structured syntax tree. " \
Eckhart Arnold's avatar
Eckhart Arnold committed
552
                "Potentially due to erroneous AST transformation."
553 554 555 556
    PREFIX_TABLE = {'§': 'Required',
                    '&': 'Lookahead', '!': 'NegativeLookahead',
                    '-&': 'Lookbehind', '-!': 'NegativeLookbehind',
                    '::': 'Pop', ':': 'Retrieve'}
557
    REPEATABLE_DIRECTIVES = {'tokens'}
558

559

eckhart's avatar
eckhart committed
560
    def __init__(self, grammar_name="DSL", grammar_source=""):
561
        self.grammar_id = 0
562
        super(EBNFCompiler, self).__init__()  # calls the reset()-method
eckhart's avatar
eckhart committed
563
        self.set_grammar_name(grammar_name, grammar_source)
564

565

566 567
    def reset(self):
        super(EBNFCompiler, self).reset()
568
        self._result = ''           # type: str
569
        self.re_flags = set()       # type: Set[str]
570 571 572
        self.rules = OrderedDict()  # type: OrderedDict[str, List[Node]]
        self.current_symbols = []   # type: List[Node]
        self.symbols = {}           # type: Dict[str, Node]
573 574
        self.variables = set()      # type: Set[str]
        self.recursive = set()      # type: Set[str]
575
        self.definitions = {}       # type: Dict[str, str]
576
        self.deferred_tasks = []    # type: List[Callable]
577
        self.root_symbol = ""       # type: str
578 579 580
        self.directives = EBNFDirectives()   # type: EBNFDirectives
        self.defined_directives = set()      # type: Set[str]
        self.consumed_custom_errors = set()  # type: Set[str]
581
        self.consumed_skip_rules = set()     # type: Set[str]
582 583
        self.grammar_id += 1

584

Eckhart Arnold's avatar
Eckhart Arnold committed
585
    @property
586
    def result(self) -> str:
Eckhart Arnold's avatar
Eckhart Arnold committed
587 588
        return self._result

eckhart's avatar
eckhart committed
589 590 591 592 593 594 595 596 597 598 599 600

    def set_grammar_name(self, grammar_name: str = "", grammar_source: str = ""):
        """
        Changes the grammar name and source.

        The grammar name and the source text are metadata that do not affect the
        compilation process. It is used to name and annotate the output.
        Returns `self`.
        """
        assert grammar_name == "" or re.match(r'\w+\Z', grammar_name)
        if not grammar_name and re.fullmatch(r'[\w/:\\]+', grammar_source):
            grammar_name = os.path.splitext(os.path.basename(grammar_source))[0]
Eckhart Arnold's avatar
Eckhart Arnold committed
601
        self.grammar_name = grammar_name or "NameUnknown"
eckhart's avatar
eckhart committed
602 603 604 605
        self.grammar_source = load_if_file(grammar_source)
        return self


606
    # methods for generating skeleton code for preprocessor, transformer, and compiler
607

608
    def gen_preprocessor_skeleton(self) -> str:
609 610 611 612
        """
        Returns Python-skeleton-code for a preprocessor-function for
        the previously compiled formal language.
        """
613
        name = self.grammar_name + "Preprocessor"
614
        return "def %s(text):\n    return text, lambda i: i\n" % name \
615
               + PREPROCESSOR_FACTORY.format(NAME=self.grammar_name)
616

617

618
    def gen_transformer_skeleton(self) -> str:
619 620 621 622
        """
        Returns Python-skeleton-code for the AST-transformation for the
        previously compiled formal language.
        """
623
        if not self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
624 625
            raise EBNFCompilerError('Compiler must be run before calling '
                                    '"gen_transformer_Skeleton()"!')
626
        tt_name = self.grammar_name + '_AST_transformation_table'
di68kap's avatar
di68kap committed
627
        transtable = [tt_name + ' = {',
Eckhart Arnold's avatar
Eckhart Arnold committed
628
                      '    # AST Transformations for the ' + self.grammar_name + '-grammar']
Eckhart Arnold's avatar
Eckhart Arnold committed
629
        transtable.append('    "<": flatten,')
630
        for name in self.rules:
eckhart's avatar
eckhart committed
631
            transformations = '[]'
632 633 634 635 636
            # rule = self.definitions[name]
            # if rule.startswith('Alternative'):
            #     transformations = '[replace_or_reduce]'
            # elif rule.startswith('Synonym'):
            #     transformations = '[reduce_single_child]'
eckhart's avatar
eckhart committed
637
            transtable.append('    "' + name + '": %s,' % transformations)
638
        # transtable.append('    ":Token": reduce_single_child,')
eckhart's avatar
eckhart committed
639
        transtable += ['    "*": replace_by_single_child', '}', '']
640
        transtable += [TRANSFORMER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
641 642
        return '\n'.join(transtable)

643

644
    def gen_compiler_skeleton(self) -> str:
645 646 647 648
        """
        Returns Python-skeleton-code for a Compiler-class for the
        previously compiled formal language.
        """
649
        if not self.rules:
650 651
            raise EBNFCompilerError('Compiler has not been run before calling '
                                    '"gen_Compiler_Skeleton()"!')
652
        compiler = ['class ' + self.grammar_name + 'Compiler(Compiler):',
eckhart's avatar
eckhart committed
653 654
                    '    """Compiler for the abstract-syntax-tree of a '
                    + self.grammar_name + ' source file.',
655
                    '    """', '',
eckhart's avatar
eckhart committed
656 657 658
                    '    def __init__(self):',
                    '        super(' + self.grammar_name + 'Compiler, self).__init__()',
                    '',
659 660
                    '    def reset(self):',
                    '        super().reset()',
eckhart's avatar
eckhart committed
661 662
                    '        # initialize your variables here, not in the constructor!',
                    '']
663
        for name in self.rules:
eckhart's avatar
eckhart committed
664
            method_name = visitor_name(name)
665
            if name == self.root_symbol:
666
                compiler += ['    def ' + method_name + '(self, node):',
667
                             '        return self.fallback_compiler(node)', '']
668
            else:
di68kap's avatar
di68kap committed
669
                compiler += ['    # def ' + method_name + '(self, node):',
eckhart's avatar
eckhart committed
670
                             '    #     return node', '']
671
        compiler += [COMPILER_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)]
672
        return '\n'.join(compiler)
673

674
    def verify_transformation_table(self, transtable):
675 676 677 678 679 680
        """
        Checks for symbols that occur in the transformation-table but have
        never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the transformation
        table.
        """
681
        assert self._dirty_flag
682
        table_entries = set(expand_table(transtable).keys()) - {'*', '<', '>', '~'}
683 684 685 686 687 688
        symbols = self.rules.keys()
        messages = []
        for entry in table_entries:
            if entry not in symbols and not entry.startswith(":"):
                messages.append(Error(('Symbol "%s" is not defined in grammar %s but appears in '
                                       'the transformation table!') % (entry, self.grammar_name),
689
                                      0, Error.UNDEFINED_SYMBOL_IN_TRANSTABLE_WARNING))
690 691
        return messages

692 693 694 695 696 697 698
    def verify_compiler(self, compiler):
        """
        Checks for on_XXXX()-methods that occur in the compiler, although XXXX
        has never been defined in the grammar. Usually, this kind of
        inconsistency results from an error like a typo in the compiler-code.
        """
        pass  # TODO: add verification code here
699

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722

    def _check_rx(self, node: Node, rx: str) -> str:
        """
        Checks whether the string `rx` represents a valid regular
        expression. Makes sure that multiline regular expressions are
        prepended by the multiline-flag. Returns the regular expression string.
        """
        flags = self.re_flags | {'x'} if rx.find('\n') >= 0 else self.re_flags
        if flags:
            rx = "(?%s)%s" % ("".join(flags), rx)
        try:
            re.compile(rx)
        except Exception as re_error:
            self.tree.new_error(node, "malformed regular expression %s: %s" %
                                (repr(rx), str(re_error)))
        return rx


    def _extract_regex(self, node: Node) -> str:
        """Extracts regular expression string from regexp-Node."""
        value = node.content.strip("~")
        if value[0] + value[-1] in {'""', "''"}:
            value = escape_re(value[1:-1])
723
        elif value[0] + value[-1] == '//' and value != '//':
724 725 726 727
            value = self._check_rx(node, value[1:-1])
        return value


eckhart's avatar
eckhart committed
728
    def _gen_search_rule(self, nd: Node) -> ReprType:
729 730 731
        """Generates a search rule, which can be either a string for simple
        string search or a regular expression from the nodes content. Returns
        an empty string in case the node is neither regexp nor literal.
732
        """
733
        if nd.tag_name == 'regexp':
734
            return unrepr("re.compile(r'%s')" % self._extract_regex(nd))
735
        elif nd.tag_name == 'literal':
736 737 738 739
            s = nd.content.strip()
            return s.strip('"') if s[0] == '"' else s.strip("'")
        return ''

740 741 742 743 744 745 746
    def _gen_search_list(self, nodes: Sequence[Node]) -> List[Union[unrepr, str]]:
        search_list = []  # type: List[Union[unrepr, str]]
        for child in nodes:
            rule = self._gen_search_rule(child)
            search_list.append(rule if rule else unrepr(child.content.strip()))
        return search_list

747

748 749 750 751 752
    def assemble_parser(self, definitions: List[Tuple[str, str]], root_node: Node) -> str:
        """
        Creates the Python code for the parser after compilation of
        the EBNF-Grammar
        """
753 754 755 756 757 758 759 760 761 762

        # execute deferred tasks, for example semantic checks that cannot
        # be done before the symbol table is complete

        for task in self.deferred_tasks:
            task()

        # provide for capturing of symbols that are variables, i.e. the
        # value of will be retrieved at some point during the parsing process

763 764 765
        if self.variables:
            for i in range(len(definitions)):
                if definitions[i][0] in self.variables:
766
                    definitions[i] = (definitions[i][0], 'Capture(%s)' % definitions[i][1])
767

768 769
        # add special fields for Grammar class

770 771 772
        if DROP_WSPC in self.directives.drop:
            definitions.append((self.DROP_WHITESPACE_PARSER_KEYWORD,
                                'DropWhitespace(%s)' % self.WHITESPACE_KEYWORD))
773 774 775
        else:
            definitions.append((self.WHITESPACE_PARSER_KEYWORD,
                                'Whitespace(%s)' % self.WHITESPACE_KEYWORD))
776
        definitions.append((self.WHITESPACE_KEYWORD,
eckhart's avatar
eckhart committed
777 778
                            ("mixin_comment(whitespace=" + self.RAW_WS_KEYWORD
                             + ", comment=" + self.COMMENT_KEYWORD + ")")))
779 780
        definitions.append((self.RAW_WS_KEYWORD, "r'{}'".format(self.directives.whitespace)))
        definitions.append((self.COMMENT_KEYWORD, "r'{}'".format(self.directives.comment)))
781 782 783

        # prepare and add resume-rules

eckhart's avatar
eckhart committed
784
        resume_rules = dict()  # type: Dict[str, List[ReprType]]
785
        for symbol, raw_rules in self.directives.resume.items():
786 787 788 789 790
            refined_rules = []
            for rule in raw_rules:
                if isinstance(rule, unrepr) and rule.s.isidentifier():
                    try:
                        nd = self.rules[rule.s][0].children[1]
791
                        refined = self._gen_search_rule(nd)
792 793 794 795 796 797 798 799 800 801 802
                    except IndexError:
                        refined = ""
                    if refined:
                        refined_rules.append(refined)
                    else:
                        self.tree.new_error(nd, 'Symbol "%s" cannot be used in resume rule, since'
                                                ' it represents neither literal nor regexp!')
                else:
                    refined_rules.append(rule)
            resume_rules[symbol] = refined_rules
        definitions.append((self.RESUME_RULES_KEYWORD, repr(resume_rules)))
803

eckhart's avatar
eckhart committed
804 805
        # prepare and add customized error-messages

806
        for symbol, err_msgs in self.directives.error.items():
eckhart's avatar
eckhart committed
807
            custom_errors = []  # type: List[Tuple[ReprType, ReprType]]
eckhart's avatar
eckhart committed
808 809 810 811 812 813 814 815 816 817
            for search, message in err_msgs:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                custom_errors.append((search, message))
            definitions.append((symbol + self.ERR_MSG_SUFFIX, repr(custom_errors)))

818 819 820 821 822 823
        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_custom_errors:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, 'Customized error message for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
824 825 826 827 828
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)

        # prepare and add skip-rules

        for symbol, skip in self.directives.skip.items():
Eckhart Arnold's avatar
Eckhart Arnold committed
829
            skip_rules = []  # type: List[ReprType]
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
            for search in skip:
                if isinstance(search, unrepr) and search.s.isidentifier():
                    try:
                        nd = self.rules[search.s][0].children[1]
                        search = self._gen_search_rule(nd)
                    except IndexError:
                        search = ''
                skip_rules.append(search)
            definitions.append((symbol + self.SKIP_RULES_SUFFIX, repr(skip_rules)))

        for symbol in self.directives.error.keys():
            if symbol not in self.consumed_skip_rules:
                def_node = self.rules[symbol][0]
                self.tree.new_error(
                    def_node, '"Skip-rules" for symbol "{}" will never be used, '
                    'because the mandatory marker "§" appears nowhere in its definiendum!'
                    .format(symbol), Error.UNUSED_ERROR_HANDLING_WARNING)
847

848 849
        # prepare parser class header and docstring and
        # add EBNF grammar to the doc string of the parser class
850

851
        article = 'an ' if self.grammar_name[0:1] in "AaEeIiOoUu" else 'a '  # what about 'hour', 'universe' etc.?
852
        show_source = get_config_value('add_grammar_source_to_parser_docstring')
eckhart's avatar
eckhart committed
853 854 855 856
        declarations = ['class ' + self.grammar_name
                        + 'Grammar(Grammar):',
                        'r"""Parser for ' + article + self.grammar_name
                        + ' source file'
857
                        + ('. Grammar:' if self.grammar_source and show_source else '.')]
eckhart's avatar
eckhart committed
858
        definitions.append(('parser_initialization__', '["upon instantiation"]'))
859
        definitions.append(('static_analysis_pending__', '[True]'))
860
        if self.grammar_source:
861
            definitions.append(('source_hash__',
862
                                '"%s"' % md5(self.grammar_source, __version__)))
863
            declarations.append('')
864 865
            if show_source:
                declarations += [line for line in self.grammar_source.split('\n')]
866 867 868 869 870
            while declarations[-1].strip() == '':
                declarations = declarations[:-1]
        declarations.append('"""')

        # turn definitions into declarations in reverse order
871

872
        self.root_symbol = definitions[0][0] if definitions else ""
873 874 875 876 877 878 879 880
        definitions.reverse()
        declarations += [symbol + ' = Forward()'
                         for symbol in sorted(list(self.recursive))]
        for symbol, statement in definitions:
            if symbol in self.recursive:
                declarations += [symbol + '.set(' + statement + ')']
            else:
                declarations += [symbol + ' = ' + statement]
881 882 883 884 885 886

        # check for symbols used but never defined

        defined_symbols = set(self.rules.keys()) | self.RESERVED_SYMBOLS
        for symbol in self.symbols:
            if symbol not in defined_symbols:
eckhart's avatar
eckhart committed
887
                self.tree.new_error(self.symbols[symbol],
eckhart's avatar
eckhart committed
888
                                    "Missing definition for symbol '%s'" % symbol)
889
                # root_node.error_flag = True
890 891 892

        # check for unconnected rules

Eckhart Arnold's avatar
Eckhart Arnold committed
893 894 895
        defined_symbols.difference_update(self.RESERVED_SYMBOLS)

        def remove_connections(symbol):
896
            """Recursively removes all symbols which appear in the
eckhart's avatar
eckhart committed
897
            definiens of a particular symbol."""
Eckhart Arnold's avatar
Eckhart Arnold committed
898 899 900 901 902 903 904
            if symbol in defined_symbols:
                defined_symbols.remove(symbol)
                for related in self.rules[symbol][1:]:
                    remove_connections(str(related))

        remove_connections(self.root_symbol)
        for leftover in defined_symbols:
eckhart's avatar
eckhart committed
905 906 907
            self.tree.new_error(self.rules[leftover][0],
                                ('Rule "%s" is not connected to parser root "%s" !') %
                                (leftover, self.root_symbol), Error.WARNING)
908

909
        # set root_symbol parser and assemble python grammar definition
910

911 912
        if self.root_symbol and 'root__' not in self.rules:
            declarations.append('root__ = ' + self.root_symbol)
913
        declarations.append('')
Eckhart Arnold's avatar
Eckhart Arnold committed
914
        self._result = '\n    '.join(declarations) \
915
                       + GRAMMAR_FACTORY.format(NAME=self.grammar_name, ID=self.grammar_id)
Eckhart Arnold's avatar
Eckhart Arnold committed
916
        return self._result
917

918 919 920

    ## compilation methods

921
    def on_syntax(self, node: Node) -> str:
922
        definitions = []  # type: List[Tuple[str, str]]
923 924

        # drop the wrapping sequence node
925
        if len(node.children) == 1 and node.children[0].is_anonymous():
926
            node = node.children[0]
927 928

        # compile definitions and directives and collect definitions
929
        for nd in node.children:
930
            if nd.tag_name == "definition":
931
                definitions.append(self.compile(nd))
932
            else:
933
                assert nd.tag_name == "directive", nd.as_sxpr()
934
                self.compile(nd)
935
            # node.error_flag = max(node.error_flag, nd.error_flag)
936
        self.definitions.update(definitions)
937

938
        grammar_python_src = self.assemble_parser(definitions, node)
939 940 941 942 943 944 945 946 947 948 949 950 951 952
        if get_config_value('static_analysis') == 'early':
            try:
                grammar_class = compile_python_object(DHPARSER_IMPORTS + grammar_python_src,
                                                      self.grammar_name)
                _ = grammar_class()
                grammar_python_src = grammar_python_src.replace(
                    'static_analysis_pending__ = [True]', 'static_analysis_pending__ = []', 1)
            except NameError:
                pass  # undefined name in the grammar are already caught and reported
            except GrammarError as error:
                for sym, prs, err in error.errors:
                    symdef_node = self.rules[sym][0]
                    err.pos = self.rules[sym][0].pos
                    self.tree.add_error(symdef_node, err)
953
        return grammar_python_src
954

955

956
    def on_definition(self, node: Node) -> Tuple[str, str]:
957
        rule = node.children[0].content
958
        if rule in self.rules:
Eckhart Arnold's avatar
Eckhart Arnold committed
959
            first = self.rules[rule][0]
960
            if not id(first) in self.tree.error_nodes:
eckhart's avatar
eckhart committed
961
                self.tree.new_error(first, 'First definition of rule "%s" '
eckhart's avatar
eckhart committed
962
                                    'followed by illegal redefinitions.' % rule)
eckhart's avatar
eckhart committed
963
            self.tree.new_error(node, 'A rule "%s" has already been defined earlier.' % rule)
964
        elif rule in EBNFCompiler.RESERVED_SYMBOLS:
eckhart's avatar
eckhart committed
965
            self.tree.new_error(node, 'Symbol "%s" is a reserved symbol.' % rule)
966
        elif not sane_parser_name(rule):
eckhart's avatar
eckhart committed
967
            self.tree.new_error(node, 'Illegal symbol "%s". Symbols must not start or '
eckhart's avatar
eckhart committed
968
                                ' end with a doube underscore "__".' % rule)
969
        elif rule in self.directives.tokens:
eckhart's avatar
eckhart committed
970
            self.tree.new_error(node, 'Symbol "%s" has already been defined as '
eckhart's avatar
eckhart committed
971
                                'a preprocessor token.' % rule)
972
        elif keyword.iskeyword(rule):
eckhart's avatar
eckhart committed
973
            self.tree.new_error(node, 'Python keyword "%s" may not be used as a symbol. '
eckhart's avatar
eckhart committed
974
                                % rule + '(This may change in the future.)')
975
        try:
976 977
            self.current_symbols = [node]
            self.rules[rule] = self.current_symbols
978
            defn = self.compile(node.children[1])
979
            if rule in self.variables:
980
                defn = 'Capture(%s)' % defn
981
                self.variables.remove(rule)
982 983 984
            elif defn.find("(") < 0:
                # assume it's a synonym, like 'page = REGEX_PAGE_NR'
                defn = 'Synonym(%s)' % defn
985
        except TypeError as error:
986 987 988 989
            from traceback import extract_tb
            trace = str(extract_tb(error.__traceback__)[-1])
            errmsg = "%s (TypeError: %s; %s)\n%s" \
                     % (EBNFCompiler.AST_ERROR, str(error), trace, node.as_sxpr())
eckhart's avatar
eckhart committed
990
            self.tree.new_error(node, errmsg)
991
            rule, defn = rule + ':error', '"' + errmsg + '"'
Eckhart Arnold's avatar
Eckhart Arnold committed
992
        return rule, defn
993

994

995
    def on_directive(self, node: Node) -> str:
996
        key = node.children[0].content
997
        assert key not in self.directives.tokens
998

999
        if key not in self.REPEATABLE_DIRECTIVES and not key.endswith('_error'):
1000
            if key in self.defined_directives: