Created
April 9, 2014 14:50
-
-
Save ibizaman/10279186 to your computer and use it in GitHub Desktop.
baron and redbaron patches for Python3
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From df9fce4f690766dfa60ff072f04b3ea0ad5164fc Mon Sep 17 00:00:00 2001 | |
From: Pierre Penninckx | |
Date: Mon, 7 Apr 2014 17:57:42 +0200 | |
Subject: [PATCH 1/3] make it interpretable by python3 | |
Nearly untouched output by running 2to3 on all baron's sources. | |
--- | |
baron/__init__.py | 8 +- | |
baron/baron.py | 16 +-- | |
baron/dumper.py | 4 +- | |
baron/formatting_grouper.py | 8 +- | |
baron/grammator.py | 214 +++++++++++++++++++---------- | |
baron/grammator_control_structures.py | 69 ++++++---- | |
baron/grammator_data_structures.py | 84 +++++++---- | |
baron/grammator_imports.py | 62 ++++++--- | |
baron/grammator_operators.py | 108 ++++++++++----- | |
baron/grammator_primitives.py | 68 +++++---- | |
baron/grouper.py | 52 +++---- | |
baron/helpers.py | 4 +- | |
baron/indentation_marker.py | 12 +- | |
baron/inner_formatting_grouper.py | 20 +-- | |
baron/parser.py | 6 +- | |
baron/spliter.py | 22 +-- | |
baron/token.py | 12 +- | |
baron/tokenizer.py | 4 +- | |
baron/utils.py | 8 +- | |
tests/test_dumper.py | 2 +- | |
tests/test_formatting_grouper.py | 4 +- | |
tests/test_grammator.py | 2 +- | |
tests/test_grammator_control_structures.py | 2 +- | |
tests/test_grammator_data_structures.py | 2 +- | |
tests/test_grammator_imports.py | 2 +- | |
tests/test_grammator_operators.py | 2 +- | |
tests/test_grammator_primitives.py | 2 +- | |
tests/test_indentation_marker.py | 4 +- | |
tests/test_utils.py | 6 +- | |
29 files changed, 500 insertions(+), 309 deletions(-) | |
diff --git a/baron/__init__.py b/baron/__init__.py | |
index ba23005..87291ec 100644 | |
--- a/baron/__init__.py | |
+++ b/baron/__init__.py | |
@@ -1,4 +1,4 @@ | |
-import grouper | |
-import spliter | |
-from baron import parse, tokenize | |
-from dumper import dumps | |
+from . import grouper | |
+from . import spliter | |
+from .baron import parse, tokenize | |
+from .dumper import dumps | |
diff --git a/baron/baron.py b/baron/baron.py | |
index f6a74d2..830e5ae 100644 | |
--- a/baron/baron.py | |
+++ b/baron/baron.py | |
@@ -2,14 +2,14 @@ from ast import parse as python_ast_parse | |
from rply.errors import ParsingError | |
-from utils import PrintFunctionImportFinder | |
-from spliter import split | |
-from grouper import group | |
-from tokenizer import tokenize as _tokenize | |
-from formatting_grouper import group as space_group | |
-from grammator import generate_parse | |
-from indentation_marker import mark_indentation | |
-from inner_formatting_grouper import group as inner_group | |
+from .utils import PrintFunctionImportFinder | |
+from .spliter import split | |
+from .grouper import group | |
+from .tokenizer import tokenize as _tokenize | |
+from .formatting_grouper import group as space_group | |
+from .grammator import generate_parse | |
+from .indentation_marker import mark_indentation | |
+from .inner_formatting_grouper import group as inner_group | |
parse_tokens = generate_parse(False) | |
diff --git a/baron/dumper.py b/baron/dumper.py | |
index 5b04f22..c645eb0 100644 | |
--- a/baron/dumper.py | |
+++ b/baron/dumper.py | |
@@ -1,6 +1,6 @@ | |
def d(j): | |
import json | |
- print json.dumps(j, indent=4) | |
+ print(json.dumps(j, indent=4)) | |
dumpers = {} | |
@@ -245,7 +245,7 @@ def funcdef_argument(node): | |
yield "=" | |
yield dump_node_list(node["second_formatting"]) | |
yield dump_node(node["value"]) | |
- elif isinstance(node["name"], basestring): | |
+ elif isinstance(node["name"], str): | |
yield node["name"] | |
else: | |
yield dump_node(node["name"]) | |
diff --git a/baron/formatting_grouper.py b/baron/formatting_grouper.py | |
index c58dbe3..8dc691e 100644 | |
--- a/baron/formatting_grouper.py | |
+++ b/baron/formatting_grouper.py | |
@@ -1,4 +1,4 @@ | |
-from utils import FlexibleIterator | |
+from .utils import FlexibleIterator | |
class UnExpectedSpaceToken(Exception): | |
pass | |
@@ -112,19 +112,19 @@ def group_generator(sequence): | |
if iterator.end(): | |
return | |
- current = iterator.next() | |
+ current = next(iterator) | |
if current is None: | |
return | |
if current[0] in ("SPACE", "COMMENT") and iterator.show_next() and iterator.show_next()[0] in GROUP_SPACE_BEFORE: | |
- new_current = iterator.next() | |
+ new_current = next(iterator) | |
current = (new_current[0], new_current[1], [current]) | |
if current[0] in GROUP_SPACE_AFTER and\ | |
(iterator.show_next() and iterator.show_next()[0] in ("SPACE", "COMMENT")) and\ | |
(not iterator.show_next(2) or (iterator.show_next(2) and not less_prioritary_than(current[0], iterator.show_next(2)[0]))): | |
- after_space = iterator.next() | |
+ after_space = next(iterator) | |
current = (current[0], current[1], current[2] if len(current) > 2 else [], [after_space]) | |
yield current | |
diff --git a/baron/grammator.py b/baron/grammator.py | |
index 6dbd681..731dbc4 100644 | |
--- a/baron/grammator.py | |
+++ b/baron/grammator.py | |
@@ -1,33 +1,36 @@ | |
-from token import BaronToken | |
-from parser import BaronParserGenerator | |
+from .token import BaronToken | |
+from .parser import BaronParserGenerator | |
-from tokenizer import TOKENS, KEYWORDS, tokenize | |
-from utils import create_node_from_token | |
-from grammator_imports import include_imports | |
-from grammator_control_structures import include_control_structures | |
-from grammator_primitives import include_primivites | |
-from grammator_operators import include_operators | |
-from grammator_data_structures import include_data_structures | |
+from .tokenizer import TOKENS, KEYWORDS, tokenize | |
+from .utils import create_node_from_token | |
+from .grammator_imports import include_imports | |
+from .grammator_control_structures import include_control_structures | |
+from .grammator_primitives import include_primivites | |
+from .grammator_operators import include_operators | |
+from .grammator_data_structures import include_data_structures | |
def generate_parse(print_function): | |
if print_function: | |
- pg = BaronParserGenerator(tuple(map(lambda x: x.upper(), filter(lambda x: x != "print", KEYWORDS))) + zip(*TOKENS)[1] + ("ENDMARKER", "INDENT", "DEDENT"), cache_id="baron") | |
+ pg = BaronParserGenerator(tuple([x.upper() for x in [x for x in KEYWORDS if x != "print"]]) + list(zip(*TOKENS))[1] + ("ENDMARKER", "INDENT", "DEDENT"), cache_id="baron") | |
else: | |
- pg = BaronParserGenerator(tuple(map(lambda x: x.upper(), KEYWORDS)) + zip(*TOKENS)[1] + ("ENDMARKER", "INDENT", "DEDENT"), cache_id="baron") | |
+ pg = BaronParserGenerator(tuple([x.upper() for x in KEYWORDS]) + list(zip(*TOKENS))[1] + ("ENDMARKER", "INDENT", "DEDENT"), cache_id="baron") | |
@pg.production("main : statements") | |
- def main((statements,)): | |
- return filter(None, statements) if statements else [] | |
+ def main(pack): | |
+ (statements,) = pack | |
+ return [_f for _f in statements if _f] if statements else [] | |
@pg.production("statements : statements statement") | |
- def statements_statement((statements, statement)): | |
+ def statements_statement(pack): | |
+ (statements, statement) = pack | |
return statements + statement | |
@pg.production("statements : statement SEMICOLON") | |
- def statement_semicolon((statement, semicolon)): | |
+ def statement_semicolon(pack): | |
+ (statement, semicolon) = pack | |
return statement +\ | |
[{ | |
"type": "semicolon", | |
@@ -38,17 +41,20 @@ def generate_parse(print_function): | |
@pg.production("statements : statement") | |
- def statement((statement,)): | |
+ def statement(pack): | |
+ (statement,) = pack | |
return statement | |
@pg.production("statement : endl") | |
- def statement_endl((endl,)): | |
+ def statement_endl(pack): | |
+ (endl,) = pack | |
return endl | |
@pg.production("endl : ENDL") | |
- def endl((endl,)): | |
+ def endl(pack): | |
+ (endl,) = pack | |
return [{ | |
"type": "endl", | |
"value": endl.value, | |
@@ -58,12 +64,14 @@ def generate_parse(print_function): | |
@pg.production("left_parenthesis : LEFT_PARENTHESIS") | |
- def left_parenthesis((lp,)): | |
+ def left_parenthesis(pack): | |
+ (lp,) = pack | |
return lp | |
@pg.production("endl : COMMENT ENDL") | |
- def comment((comment_, endl)): | |
+ def comment(pack): | |
+ (comment_, endl) = pack | |
return [{ | |
"type": "comment", | |
"value": comment_.value, | |
@@ -77,17 +85,20 @@ def generate_parse(print_function): | |
@pg.production("statement : ENDMARKER") | |
- def end((endmarker)): | |
+ def end(pack): | |
+ (endmarker) = pack | |
return [None] | |
@pg.production("statement : simple_stmt") | |
@pg.production("statement : compound_stmt") | |
- def statement_simple_statement((stmt,)): | |
+ def statement_simple_statement(pack): | |
+ (stmt,) = pack | |
return stmt | |
@pg.production("simple_stmt : small_stmt SEMICOLON endl") | |
- def simple_stmt_semicolon_endl((small_stmt, semicolon, endl)): | |
+ def simple_stmt_semicolon_endl(pack): | |
+ (small_stmt, semicolon, endl) = pack | |
return [small_stmt, | |
{ | |
"type": "semicolon", | |
@@ -98,12 +109,14 @@ def generate_parse(print_function): | |
@pg.production("simple_stmt : small_stmt endl") | |
- def simple_stmt((small_stmt, endl)): | |
+ def simple_stmt(pack): | |
+ (small_stmt, endl) = pack | |
return [small_stmt] + endl | |
@pg.production("simple_stmt : small_stmt SEMICOLON simple_stmt") | |
- def simple_stmt_semicolon((small_stmt, semicolon, simple_stmt)): | |
+ def simple_stmt_semicolon(pack): | |
+ (small_stmt, semicolon, simple_stmt) = pack | |
return [small_stmt, | |
{ | |
"type": "semicolon", | |
@@ -127,12 +140,14 @@ def generate_parse(print_function): | |
@pg.production("compound_stmt : classdef") | |
@pg.production("compound_stmt : with_stmt") | |
@pg.production("compound_stmt : decorated") | |
- def small_and_compound_stmt((statement,)): | |
+ def small_and_compound_stmt(pack): | |
+ (statement,) = pack | |
return statement | |
if not print_function: | |
@pg.production("small_stmt : print_stmt") | |
- def print_statement((statement,)): | |
+ def print_statement(xxx_todo_changeme): | |
+ (statement,) = xxx_todo_changeme | |
return statement | |
@@ -154,12 +169,14 @@ def generate_parse(print_function): | |
@pg.production("factor : power") | |
@pg.production("power : atom") | |
@pg.production("exprlist : expr") | |
- def term_factor((level,)): | |
+ def term_factor(pack): | |
+ (level,) = pack | |
return level | |
@pg.production("with_stmt : WITH with_items COLON suite") | |
- def with_stmt((with_, with_items, colon, suite)): | |
+ def with_stmt(pack): | |
+ (with_, with_items, colon, suite) = pack | |
return [{ | |
"type": "with", | |
"value": suite, | |
@@ -171,17 +188,20 @@ def generate_parse(print_function): | |
@pg.production("with_items : with_items comma with_item") | |
- def with_items_with_item((with_items, comma, with_item,)): | |
+ def with_items_with_item(pack): | |
+ (with_items, comma, with_item,) = pack | |
return with_items + [comma, with_item] | |
@pg.production("with_items : with_item") | |
- def with_items((with_item,)): | |
+ def with_items(pack): | |
+ (with_item,) = pack | |
return [with_item] | |
@pg.production("with_item : test") | |
- def with_item((test,)): | |
+ def with_item(pack): | |
+ (test,) = pack | |
return { | |
"type": "with_context_item", | |
"as": {}, | |
@@ -192,7 +212,8 @@ def generate_parse(print_function): | |
@pg.production("with_item : test AS expr") | |
- def with_item_as((test, as_, expr)): | |
+ def with_item_as(pack): | |
+ (test, as_, expr) = pack | |
return { | |
"type": "with_context_item", | |
"as": expr, | |
@@ -203,7 +224,8 @@ def generate_parse(print_function): | |
@pg.production("classdef : CLASS NAME COLON suite") | |
- def class_stmt((class_, name, colon, suite),): | |
+ def class_stmt(pack,): | |
+ (class_, name, colon, suite) = pack | |
return [{ | |
"type": "class", | |
"name": name.value, | |
@@ -221,7 +243,8 @@ def generate_parse(print_function): | |
@pg.production("classdef : CLASS NAME LEFT_PARENTHESIS RIGHT_PARENTHESIS COLON suite") | |
- def class_stmt_parenthesis((class_, name, left_parenthesis, right_parenthesis, colon, suite),): | |
+ def class_stmt_parenthesis(pack,): | |
+ (class_, name, left_parenthesis, right_parenthesis, colon, suite) = pack | |
return [{ | |
"type": "class", | |
"name": name.value, | |
@@ -239,7 +262,8 @@ def generate_parse(print_function): | |
@pg.production("classdef : CLASS NAME LEFT_PARENTHESIS testlist RIGHT_PARENTHESIS COLON suite") | |
- def class_stmt_inherit((class_, name, left_parenthesis, testlist, right_parenthesis, colon, suite),): | |
+ def class_stmt_inherit(pack,): | |
+ (class_, name, left_parenthesis, testlist, right_parenthesis, colon, suite) = pack | |
return [{ | |
"type": "class", | |
"name": name.value, | |
@@ -258,31 +282,36 @@ def generate_parse(print_function): | |
@pg.production("decorated : decorators funcdef") | |
@pg.production("decorated : decorators classdef") | |
- def decorated((decorators, funcdef)): | |
+ def decorated(pack): | |
+ (decorators, funcdef) = pack | |
funcdef[0]["decorators"] = decorators | |
return funcdef | |
@pg.production("decorators : decorators decorator") | |
- def decorators_decorator((decorators, decorator,)): | |
+ def decorators_decorator(pack): | |
+ (decorators, decorator,) = pack | |
return decorators + decorator | |
@pg.production("decorators : decorator") | |
- def decorators((decorator,)): | |
+ def decorators(pack): | |
+ (decorator,) = pack | |
return decorator | |
# TODO tests | |
@pg.production("decorator : endl") | |
- def decorator_endl((endl,)): | |
+ def decorator_endl(pack): | |
# thanks ipythons dev, you appears to be the only one in the world that | |
# split decorators with empty lines... like seriously. | |
+ (endl,) = pack | |
return endl | |
@pg.production("decorator : AT dotted_name endl") | |
- def decorator((at, dotted_name, endl)): | |
+ def decorator(pack): | |
+ (at, dotted_name, endl) = pack | |
return [{ | |
"type": "decorator", | |
"value": { | |
@@ -295,7 +324,8 @@ def generate_parse(print_function): | |
@pg.production("decorator : AT dotted_name LEFT_PARENTHESIS RIGHT_PARENTHESIS endl") | |
- def decorator_empty_call((at, dotted_name, left_parenthesis, right_parenthesis, endl)): | |
+ def decorator_empty_call(pack): | |
+ (at, dotted_name, left_parenthesis, right_parenthesis, endl) = pack | |
return [{ | |
"type": "decorator", | |
"value": { | |
@@ -315,7 +345,8 @@ def generate_parse(print_function): | |
@pg.production("decorator : AT dotted_name LEFT_PARENTHESIS argslist RIGHT_PARENTHESIS endl") | |
- def decorator_call((at, dotted_name, left_parenthesis, argslist, right_parenthesis, endl)): | |
+ def decorator_call(pack): | |
+ (at, dotted_name, left_parenthesis, argslist, right_parenthesis, endl) = pack | |
return [{ | |
"type": "decorator", | |
"value": { | |
@@ -335,7 +366,8 @@ def generate_parse(print_function): | |
@pg.production("funcdef : DEF NAME LEFT_PARENTHESIS parameters RIGHT_PARENTHESIS COLON suite") | |
- def function_definition((def_, name, left_parenthesis, parameters, right_parenthesis, colon, suite)): | |
+ def function_definition(pack): | |
+ (def_, name, left_parenthesis, parameters, right_parenthesis, colon, suite) = pack | |
return [{ | |
"type": "funcdef", | |
"decorators": [], | |
@@ -352,12 +384,14 @@ def generate_parse(print_function): | |
@pg.production("argslist : argslist argument") | |
@pg.production("parameters : parameters parameter") | |
- def parameters_parameters_parameter((parameters, parameter,),): | |
+ def parameters_parameters_parameter(pack,): | |
+ (parameters, parameter,) = pack | |
return parameters + parameter | |
@pg.production("argslist : argument") | |
@pg.production("parameters : parameter") | |
- def parameters_parameter((parameter,),): | |
+ def parameters_parameter(pack,): | |
+ (parameter,) = pack | |
return parameter | |
@pg.production("argument :") | |
@@ -366,14 +400,16 @@ def generate_parse(print_function): | |
return [] | |
@pg.production("name : NAME") | |
- def name((name_,)): | |
+ def name(pack): | |
+ (name_,) = pack | |
return { | |
"type": "name", | |
"value": name_.value, | |
} | |
@pg.production("argument : test") | |
- def argument_one((name,)): | |
+ def argument_one(pack): | |
+ (name,) = pack | |
return [{ | |
"type": "argument", | |
"first_formatting": [], | |
@@ -384,7 +420,8 @@ def generate_parse(print_function): | |
@pg.production("parameter : name") | |
- def parameter_one((name,)): | |
+ def parameter_one(pack): | |
+ (name,) = pack | |
return [{ | |
"type": "argument", | |
"first_formatting": [], | |
@@ -395,7 +432,8 @@ def generate_parse(print_function): | |
@pg.production("parameter : LEFT_PARENTHESIS parameter RIGHT_PARENTHESIS") | |
- def parameter_fpdef((left_parenthesis, parameter, right_parenthesis)): | |
+ def parameter_fpdef(pack): | |
+ (left_parenthesis, parameter, right_parenthesis) = pack | |
return [{ | |
"type": "associative_parenthesis", | |
"first_formatting": left_parenthesis.hidden_tokens_before, | |
@@ -407,7 +445,8 @@ def generate_parse(print_function): | |
@pg.production("parameter : LEFT_PARENTHESIS fplist RIGHT_PARENTHESIS") | |
- def parameter_fplist((left_parenthesis, fplist, right_parenthesis)): | |
+ def parameter_fplist(pack): | |
+ (left_parenthesis, fplist, right_parenthesis) = pack | |
return [{ | |
"type": "argument", | |
"first_formatting": [], | |
@@ -426,12 +465,14 @@ def generate_parse(print_function): | |
@pg.production("fplist : fplist parameter") | |
- def fplist_recur((fplist, parameter)): | |
+ def fplist_recur(pack): | |
+ (fplist, parameter) = pack | |
return fplist + parameter | |
@pg.production("fplist : parameter comma") | |
- def fplist((parameter, comma)): | |
+ def fplist(pack): | |
+ (parameter, comma) = pack | |
return parameter + [comma] | |
@@ -440,7 +481,8 @@ def generate_parse(print_function): | |
# python give me 'SyntaxError: keyword can't be an expression' when I try to | |
# put something else than a name (looks like a custom SyntaxError) | |
@pg.production("argument : test EQUAL test") | |
- def named_argument((name, equal, test)): | |
+ def named_argument(pack): | |
+ (name, equal, test) = pack | |
return [{ | |
"type": "argument", | |
"first_formatting": equal.hidden_tokens_before, | |
@@ -450,7 +492,8 @@ def generate_parse(print_function): | |
}] | |
@pg.production("parameter : name EQUAL test") | |
- def parameter_with_default((name, equal, test)): | |
+ def parameter_with_default(pack): | |
+ (name, equal, test) = pack | |
return [{ | |
"type": "argument", | |
"first_formatting": equal.hidden_tokens_before, | |
@@ -460,7 +503,8 @@ def generate_parse(print_function): | |
}] | |
@pg.production("argument : test comp_for") | |
- def generator_comprehension((test, comp_for,)): | |
+ def generator_comprehension(pack): | |
+ (test, comp_for,) = pack | |
return [{ | |
"type": "argument_generator_comprehension", | |
"result": test, | |
@@ -468,7 +512,8 @@ def generate_parse(print_function): | |
}] | |
@pg.production("argument : STAR test") | |
- def argument_star((star, test,)): | |
+ def argument_star(pack): | |
+ (star, test,) = pack | |
return [{ | |
"type": "list_argument", | |
"formatting": star.hidden_tokens_after, | |
@@ -476,7 +521,8 @@ def generate_parse(print_function): | |
}] | |
@pg.production("argument : DOUBLE_STAR test") | |
- def argument_star_star((double_star, test,)): | |
+ def argument_star_star(pack): | |
+ (double_star, test,) = pack | |
return [{ | |
"type": "dict_argument", | |
"formatting": double_star.hidden_tokens_after, | |
@@ -485,7 +531,8 @@ def generate_parse(print_function): | |
# TODO refactor those 2 to uniformise with argument_star and argument_star_star | |
@pg.production("parameter : STAR NAME") | |
- def parameter_star((star, name,)): | |
+ def parameter_star(pack): | |
+ (star, name,) = pack | |
return [{ | |
"type": "list_argument", | |
"formatting": star.hidden_tokens_after, | |
@@ -496,7 +543,8 @@ def generate_parse(print_function): | |
}] | |
@pg.production("parameter : DOUBLE_STAR NAME") | |
- def parameter_star_star((double_star, name,)): | |
+ def parameter_star_star(pack): | |
+ (double_star, name,) = pack | |
return [{ | |
"type": "dict_argument", | |
"formatting": double_star.hidden_tokens_after, | |
@@ -508,16 +556,19 @@ def generate_parse(print_function): | |
@pg.production("argument : comma") | |
@pg.production("parameter : comma") | |
- def parameter_comma((comma,)): | |
+ def parameter_comma(pack): | |
+ (comma,) = pack | |
return [comma] | |
@pg.production("suite : simple_stmt") | |
- def suite((simple_stmt,)): | |
+ def suite(pack): | |
+ (simple_stmt,) = pack | |
return simple_stmt | |
@pg.production("suite : endls INDENT statements DEDENT") | |
- def suite_indent((endls, indent, statements, dedent,)): | |
+ def suite_indent(pack): | |
+ (endls, indent, statements, dedent,) = pack | |
return endls + statements | |
@@ -536,7 +587,8 @@ def generate_parse(print_function): | |
include_data_structures(pg) | |
@pg.production("atom : LEFT_PARENTHESIS yield_expr RIGHT_PARENTHESIS") | |
- def yield_atom((left_parenthesis, yield_expr, right_parenthesis)): | |
+ def yield_atom(pack): | |
+ (left_parenthesis, yield_expr, right_parenthesis) = pack | |
return { | |
"type": "yield_atom", | |
"value": yield_expr["value"], | |
@@ -546,7 +598,8 @@ def generate_parse(print_function): | |
} | |
@pg.production("atom : BACKQUOTE testlist1 BACKQUOTE") | |
- def repr_atom((backquote, testlist1, backquote2)): | |
+ def repr_atom(pack): | |
+ (backquote, testlist1, backquote2) = pack | |
return { | |
"type": "repr", | |
"value": testlist1, | |
@@ -555,11 +608,13 @@ def generate_parse(print_function): | |
} | |
@pg.production("testlist1 : test comma testlist1") | |
- def testlist1_double((test, comma, test2,)): | |
+ def testlist1_double(pack): | |
+ (test, comma, test2,) = pack | |
return [test, comma] + test2 | |
@pg.production("testlist1 : test") | |
- def testlist1((test,)): | |
+ def testlist1(pack): | |
+ (test,) = pack | |
return [test] | |
# TODO test all the things (except INT) | |
@@ -570,17 +625,20 @@ def generate_parse(print_function): | |
@pg.production("atom : FLOAT") | |
@pg.production("atom : FLOAT_EXPONANT") | |
@pg.production("atom : COMPLEX") | |
- def int((int_,)): | |
+ def int(pack): | |
+ (int_,) = pack | |
return create_node_from_token(int_, section="number") | |
@pg.production("atom : name") | |
- def atom_name((name,)): | |
+ def atom_name(pack): | |
+ (name,) = pack | |
return name | |
@pg.production("atom : strings") | |
- def strings((string_chain,)): | |
+ def strings(pack): | |
+ (string_chain,) = pack | |
if len(string_chain) == 1: | |
return string_chain[0] | |
return { | |
@@ -589,11 +647,13 @@ def generate_parse(print_function): | |
} | |
@pg.production("strings : string strings") | |
- def strings_string_strings((string_, strings_)): | |
+ def strings_string_strings(pack): | |
+ (string_, strings_) = pack | |
return string_ + strings_ | |
@pg.production("strings : string") | |
- def strings_string((string_,)): | |
+ def strings_string(pack): | |
+ (string_,) = pack | |
return string_ | |
# TODO tests those other kind of strings | |
@@ -603,7 +663,8 @@ def generate_parse(print_function): | |
@pg.production("string : BINARY_STRING") | |
@pg.production("string : UNICODE_RAW_STRING") | |
@pg.production("string : BINARY_RAW_STRING") | |
- def string((string_,)): | |
+ def string(pack): | |
+ (string_,) = pack | |
return [{ | |
"type": string_.name.lower(), | |
"value": string_.value, | |
@@ -613,7 +674,8 @@ def generate_parse(print_function): | |
@pg.production("comma : COMMA") | |
- def comma((comma,)): | |
+ def comma(pack): | |
+ (comma,) = pack | |
return { | |
"type": "comma", | |
"first_formatting": comma.hidden_tokens_before, | |
@@ -630,9 +692,9 @@ def generate_parse(print_function): | |
token = tuple(token) | |
new_tokens.append(token) | |
- tokens = map(lambda x: BaronToken(*x) if x else x, new_tokens) + [None] | |
+ tokens = [BaronToken(*x) if x else x for x in new_tokens] + [None] | |
else: | |
- tokens = map(lambda x: BaronToken(*x) if x else x, tokens) + [None] | |
+ tokens = [BaronToken(*x) if x else x for x in tokens] + [None] | |
return parser.parse(iter(tokens)) | |
diff --git a/baron/grammator_control_structures.py b/baron/grammator_control_structures.py | |
index e30df4b..2dc8961 100644 | |
--- a/baron/grammator_control_structures.py | |
+++ b/baron/grammator_control_structures.py | |
@@ -1,6 +1,7 @@ | |
def include_control_structures(pg): | |
@pg.production("try_stmt : TRY COLON suite excepts") | |
- def try_excepts_stmt((try_, colon, suite, excepts)): | |
+ def try_excepts_stmt(pack): | |
+ (try_, colon, suite, excepts) = pack | |
return [{ | |
"type": "try", | |
"value": suite, | |
@@ -12,7 +13,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("try_stmt : TRY COLON suite excepts else_stmt") | |
- def try_excepts_else_stmt((try_, colon, suite, excepts, else_stmt)): | |
+ def try_excepts_else_stmt(pack): | |
+ (try_, colon, suite, excepts, else_stmt) = pack | |
return [{ | |
"type": "try", | |
"value": suite, | |
@@ -24,7 +26,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("try_stmt : TRY COLON suite excepts finally_stmt") | |
- def try_excepts_finally_stmt((try_, colon, suite, excepts, finally_stmt)): | |
+ def try_excepts_finally_stmt(pack): | |
+ (try_, colon, suite, excepts, finally_stmt) = pack | |
return [{ | |
"type": "try", | |
"value": suite, | |
@@ -36,7 +39,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("try_stmt : TRY COLON suite excepts else_stmt finally_stmt") | |
- def try_excepts_else_finally_stmt((try_, colon, suite, excepts, else_stmt, finally_stmt)): | |
+ def try_excepts_else_finally_stmt(pack): | |
+ (try_, colon, suite, excepts, else_stmt, finally_stmt) = pack | |
return [{ | |
"type": "try", | |
"value": suite, | |
@@ -48,7 +52,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("try_stmt : TRY COLON suite finally_stmt") | |
- def try_stmt((try_, colon, suite, finally_stmt)): | |
+ def try_stmt(pack): | |
+ (try_, colon, suite, finally_stmt) = pack | |
return [{ | |
"type": "try", | |
"value": suite, | |
@@ -60,15 +65,18 @@ def include_control_structures(pg): | |
}] | |
@pg.production("excepts : excepts except_stmt") | |
- def excepts((excepts_, except_stmt)): | |
+ def excepts(pack): | |
+ (excepts_, except_stmt) = pack | |
return excepts_ + except_stmt | |
@pg.production("excepts : except_stmt") | |
- def excepts_except_stmt((except_stmt,)): | |
+ def excepts_except_stmt(pack): | |
+ (except_stmt,) = pack | |
return except_stmt | |
@pg.production("except_stmt : EXCEPT test AS test COLON suite") | |
- def except_as_stmt((except_, test, as_, test2, colon, suite)): | |
+ def except_as_stmt(pack): | |
+ (except_, test, as_, test2, colon, suite) = pack | |
return [{ | |
"type": "except", | |
"first_formatting": except_.hidden_tokens_after, | |
@@ -83,7 +91,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("except_stmt : EXCEPT test COMMA test COLON suite") | |
- def except_comma_stmt((except_, test, comma, test2, colon, suite)): | |
+ def except_comma_stmt(pack): | |
+ (except_, test, comma, test2, colon, suite) = pack | |
return [{ | |
"type": "except", | |
"first_formatting": except_.hidden_tokens_after, | |
@@ -98,7 +107,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("except_stmt : EXCEPT COLON suite") | |
- def except_stmt_empty((except_, colon, suite)): | |
+ def except_stmt_empty(pack): | |
+ (except_, colon, suite) = pack | |
return [{ | |
"type": "except", | |
"first_formatting": except_.hidden_tokens_after, | |
@@ -113,7 +123,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("except_stmt : EXCEPT test COLON suite") | |
- def except_stmt((except_, test, colon, suite)): | |
+ def except_stmt(pack): | |
+ (except_, test, colon, suite) = pack | |
return [{ | |
"type": "except", | |
"first_formatting": except_.hidden_tokens_after, | |
@@ -128,7 +139,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("finally_stmt : FINALLY COLON suite") | |
- def finally_stmt((finally_, colon, suite)): | |
+ def finally_stmt(pack): | |
+ (finally_, colon, suite) = pack | |
return { | |
"type": "finally", | |
"value": suite, | |
@@ -137,7 +149,8 @@ def include_control_structures(pg): | |
} | |
@pg.production("else_stmt : ELSE COLON suite") | |
- def else_stmt((else_, colon, suite)): | |
+ def else_stmt(pack): | |
+ (else_, colon, suite) = pack | |
return { | |
"type": "else", | |
"value": suite, | |
@@ -146,7 +159,8 @@ def include_control_structures(pg): | |
} | |
@pg.production("for_stmt : FOR exprlist IN testlist COLON suite") | |
- def for_stmt((for_, exprlist, in_, testlist, colon, suite),): | |
+ def for_stmt(pack,): | |
+ (for_, exprlist, in_, testlist, colon, suite) = pack | |
return [{ | |
"type": "for", | |
"value": suite, | |
@@ -161,7 +175,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("for_stmt : FOR exprlist IN testlist COLON suite else_stmt") | |
- def for_else_stmt((for_, exprlist, in_, testlist, colon, suite, else_stmt),): | |
+ def for_else_stmt(pack,): | |
+ (for_, exprlist, in_, testlist, colon, suite, else_stmt) = pack | |
return [{ | |
"type": "for", | |
"value": suite, | |
@@ -176,7 +191,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("while_stmt : WHILE test COLON suite") | |
- def while_stmt((while_, test, colon, suite)): | |
+ def while_stmt(pack): | |
+ (while_, test, colon, suite) = pack | |
return [{ | |
"type": "while", | |
"value": suite, | |
@@ -188,7 +204,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("while_stmt : WHILE test COLON suite else_stmt") | |
- def while_stmt_else((while_, test, colon, suite, else_stmt)): | |
+ def while_stmt_else(pack): | |
+ (while_, test, colon, suite, else_stmt) = pack | |
return [{ | |
"type": "while", | |
"value": suite, | |
@@ -200,7 +217,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("if_stmt : IF test COLON suite") | |
- def if_stmt((if_, test, colon, suite)): | |
+ def if_stmt(pack): | |
+ (if_, test, colon, suite) = pack | |
return [{ | |
"type": "ifelseblock", | |
"value": [{ | |
@@ -214,7 +232,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("if_stmt : IF test COLON suite elifs") | |
- def if_elif_stmt((if_, test, colon, suite, elifs)): | |
+ def if_elif_stmt(pack): | |
+ (if_, test, colon, suite, elifs) = pack | |
return [{ | |
"type": "ifelseblock", | |
"value": [{ | |
@@ -228,7 +247,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("elifs : elifs ELIF test COLON suite") | |
- def elifs_elif((elifs, elif_, test, colon, suite),): | |
+ def elifs_elif(pack,): | |
+ (elifs, elif_, test, colon, suite) = pack | |
return elifs + [{ | |
"type": "elif", | |
"first_formatting": elif_.hidden_tokens_after, | |
@@ -239,7 +259,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("elifs : ELIF test COLON suite") | |
- def elif_((elif_, test, colon, suite),): | |
+ def elif_(pack,): | |
+ (elif_, test, colon, suite) = pack | |
return [{ | |
"type": "elif", | |
"first_formatting": elif_.hidden_tokens_after, | |
@@ -250,7 +271,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("if_stmt : IF test COLON suite else_stmt") | |
- def if_else_stmt((if_, test, colon, suite, else_stmt)): | |
+ def if_else_stmt(pack): | |
+ (if_, test, colon, suite, else_stmt) = pack | |
return [{ | |
"type": "ifelseblock", | |
"value": [{ | |
@@ -264,7 +286,8 @@ def include_control_structures(pg): | |
}] | |
@pg.production("if_stmt : IF test COLON suite elifs else_stmt") | |
- def if_elif_else_stmt((if_, test, colon, suite, elifs, else_stmt)): | |
+ def if_elif_else_stmt(pack): | |
+ (if_, test, colon, suite, elifs, else_stmt) = pack | |
return [{ | |
"type": "ifelseblock", | |
"value": [{ | |
diff --git a/baron/grammator_data_structures.py b/baron/grammator_data_structures.py | |
index 8c7c272..ac8be3b 100644 | |
--- a/baron/grammator_data_structures.py | |
+++ b/baron/grammator_data_structures.py | |
@@ -1,7 +1,8 @@ | |
def include_data_structures(pg): | |
# TODO remove left_parenthesis and use LEFT_PARENTHESIS instead | |
@pg.production("atom : left_parenthesis testlist_comp RIGHT_PARENTHESIS") | |
- def tuple((left_parenthesis, testlist_comp, right_parenthesis,)): | |
+ def tuple(pack): | |
+ (left_parenthesis, testlist_comp, right_parenthesis,) = pack | |
return { | |
"type": "tuple", | |
"value": testlist_comp, | |
@@ -14,7 +15,8 @@ def include_data_structures(pg): | |
@pg.production("atom : left_parenthesis test RIGHT_PARENTHESIS") | |
- def associative_parenthesis((left_parenthesis, test, right_parenthesis,)): | |
+ def associative_parenthesis(pack): | |
+ (left_parenthesis, test, right_parenthesis,) = pack | |
return { | |
"type": "associative_parenthesis", | |
"first_formatting": left_parenthesis.hidden_tokens_before, | |
@@ -28,7 +30,8 @@ def include_data_structures(pg): | |
@pg.production("testlist : test comma") | |
@pg.production("exprlist : expr comma") | |
@pg.production("subscriptlist : subscript comma") | |
- def implicit_tuple_alone((test, comma)): | |
+ def implicit_tuple_alone(pack): | |
+ (test, comma) = pack | |
return { | |
"type": "tuple", | |
"value": [test, comma], | |
@@ -43,7 +46,8 @@ def include_data_structures(pg): | |
@pg.production("testlist : test testlist_part") | |
@pg.production("exprlist : expr exprlist_part") | |
@pg.production("subscriptlist : subscript subscriptlist_part") | |
- def implicit_tuple((test, testlist_part)): | |
+ def implicit_tuple(pack): | |
+ (test, testlist_part) = pack | |
return { | |
"type": "tuple", | |
"value": [test] + testlist_part, | |
@@ -58,7 +62,8 @@ def include_data_structures(pg): | |
@pg.production("testlist_part : COMMA test") | |
@pg.production("exprlist_part : COMMA expr") | |
@pg.production("subscriptlist_part : COMMA subscript") | |
- def testlist_part((comma, test)): | |
+ def testlist_part(pack): | |
+ (comma, test) = pack | |
return [{ | |
"type": "comma", | |
"first_formatting": comma.hidden_tokens_before, | |
@@ -68,7 +73,8 @@ def include_data_structures(pg): | |
@pg.production("testlist_part : COMMA test COMMA") | |
@pg.production("exprlist_part : COMMA expr COMMA") | |
@pg.production("subscriptlist_part : COMMA subscript COMMA") | |
- def testlist_part_comma((comma, test, comma2)): | |
+ def testlist_part_comma(pack): | |
+ (comma, test, comma2) = pack | |
return [{ | |
"type": "comma", | |
"first_formatting": comma.hidden_tokens_before, | |
@@ -83,7 +89,8 @@ def include_data_structures(pg): | |
@pg.production("testlist_part : COMMA test testlist_part") | |
@pg.production("exprlist_part : COMMA expr exprlist_part") | |
@pg.production("subscriptlist_part : COMMA subscript subscriptlist_part") | |
- def testlist_part_next((comma, test, testlist_part)): | |
+ def testlist_part_next(pack): | |
+ (comma, test, testlist_part) = pack | |
return [{ | |
"type": "comma", | |
"first_formatting": comma.hidden_tokens_before, | |
@@ -97,17 +104,20 @@ def include_data_structures(pg): | |
@pg.production("testlist_comp : test comma test") | |
- def testlist_comp_two((test, comma, test2)): | |
+ def testlist_comp_two(pack): | |
+ (test, comma, test2) = pack | |
return [test, comma, test2] | |
@pg.production("testlist_comp : test comma testlist_comp") | |
- def testlist_comp_more((test, comma, testlist_comp)): | |
+ def testlist_comp_more(pack): | |
+ (test, comma, testlist_comp) = pack | |
return [test, comma] + testlist_comp | |
@pg.production("atom : LEFT_SQUARE_BRACKET listmaker RIGHT_SQUARE_BRACKET") | |
- def list((left_bracket, listmaker, right_bracket,)): | |
+ def list(pack): | |
+ (left_bracket, listmaker, right_bracket,) = pack | |
return { | |
"type": "list", | |
"first_formatting": left_bracket.hidden_tokens_before, | |
@@ -124,17 +134,20 @@ def include_data_structures(pg): | |
@pg.production("listmaker : test") | |
- def listmaker_one((test,)): | |
+ def listmaker_one(pack): | |
+ (test,) = pack | |
return [test] | |
@pg.production("listmaker : test comma listmaker") | |
- def listmaker_more((test, comma, listmaker)): | |
+ def listmaker_more(pack): | |
+ (test, comma, listmaker) = pack | |
return [test, comma] + listmaker | |
@pg.production("atom : LEFT_BRACKET dictmaker RIGHT_BRACKET") | |
- def dict((left_bracket, dictmaker, right_bracket,)): | |
+ def dict(pack): | |
+ (left_bracket, dictmaker, right_bracket,) = pack | |
return { | |
"type": "dict", | |
"first_formatting": left_bracket.hidden_tokens_before, | |
@@ -151,7 +164,8 @@ def include_data_structures(pg): | |
@pg.production("dictmaker : test COLON test") | |
- def dict_one((test, colon, test2)): | |
+ def dict_one(pack): | |
+ (test, colon, test2) = pack | |
return [{ | |
"first_formatting": colon.hidden_tokens_before, | |
"second_formatting": colon.hidden_tokens_after, | |
@@ -162,7 +176,8 @@ def include_data_structures(pg): | |
@pg.production("dictmaker : test COLON test comma dictmaker") | |
- def dict_more((test, colon, test2, comma, dictmaker)): | |
+ def dict_more(pack): | |
+ (test, colon, test2, comma, dictmaker) = pack | |
return [{ | |
"first_formatting": colon.hidden_tokens_before, | |
"second_formatting": colon.hidden_tokens_after, | |
@@ -173,7 +188,8 @@ def include_data_structures(pg): | |
@pg.production("atom : LEFT_BRACKET setmaker RIGHT_BRACKET") | |
- def set((left_bracket, setmaker, right_bracket,)): | |
+ def set(pack): | |
+ (left_bracket, setmaker, right_bracket,) = pack | |
return { | |
"type": "set", | |
"first_formatting": left_bracket.hidden_tokens_before, | |
@@ -185,17 +201,20 @@ def include_data_structures(pg): | |
@pg.production("setmaker : test comma setmaker") | |
- def set_more((test, comma, setmaker)): | |
+ def set_more(pack): | |
+ (test, comma, setmaker) = pack | |
return [test, comma] + setmaker | |
@pg.production("setmaker : test") | |
- def set_one((test,)): | |
+ def set_one(pack): | |
+ (test,) = pack | |
return [test] | |
@pg.production("atom : left_parenthesis test comp_for RIGHT_PARENTHESIS") | |
- def generator_comprehension((left_parenthesis, test, comp_for, right_parenthesis,)): | |
+ def generator_comprehension(pack): | |
+ (left_parenthesis, test, comp_for, right_parenthesis,) = pack | |
return { | |
"type": "generator_comprehension", | |
"first_formatting": left_parenthesis.hidden_tokens_before, | |
@@ -207,7 +226,8 @@ def include_data_structures(pg): | |
} | |
@pg.production("atom : LEFT_SQUARE_BRACKET test list_for RIGHT_SQUARE_BRACKET") | |
- def list_comprehension((left_square_bracket, test, list_for, right_square_bracket)): | |
+ def list_comprehension(pack): | |
+ (left_square_bracket, test, list_for, right_square_bracket) = pack | |
return { | |
"type": "list_comprehension", | |
"first_formatting": left_square_bracket.hidden_tokens_before, | |
@@ -219,7 +239,8 @@ def include_data_structures(pg): | |
} | |
@pg.production("atom : LEFT_BRACKET test COLON test comp_for RIGHT_BRACKET") | |
- def dict_comprehension((left_bracket, test, colon, test2, list_for, right_bracket)): | |
+ def dict_comprehension(pack): | |
+ (left_bracket, test, colon, test2, list_for, right_bracket) = pack | |
return { | |
"type": "dict_comprehension", | |
"first_formatting": left_bracket.hidden_tokens_before, | |
@@ -236,7 +257,8 @@ def include_data_structures(pg): | |
} | |
@pg.production("atom : LEFT_BRACKET test comp_for RIGHT_BRACKET") | |
- def set_comprehension((left_bracket, test, list_for, right_bracket)): | |
+ def set_comprehension(pack): | |
+ (left_bracket, test, list_for, right_bracket) = pack | |
return { | |
"type": "set_comprehension", | |
"first_formatting": left_bracket.hidden_tokens_before, | |
@@ -249,7 +271,8 @@ def include_data_structures(pg): | |
@pg.production("list_for : FOR exprlist IN old_test") | |
@pg.production("comp_for : FOR exprlist IN or_test") | |
- def comp_for((for_, exprlist, in_, or_test)): | |
+ def comp_for(pack): | |
+ (for_, exprlist, in_, or_test) = pack | |
return [{ | |
"type": "comprehension_loop", | |
"first_formatting": for_.hidden_tokens_before, | |
@@ -263,7 +286,8 @@ def include_data_structures(pg): | |
@pg.production("list_for : FOR exprlist IN old_test") | |
@pg.production("list_for : FOR exprlist IN testlist_safe") | |
- def comp_for_implicite_tuple((for_, exprlist, in_, testlist_safe)): | |
+ def comp_for_implicite_tuple(pack): | |
+ (for_, exprlist, in_, testlist_safe) = pack | |
return [{ | |
"type": "comprehension_loop", | |
"first_formatting": for_.hidden_tokens_before, | |
@@ -286,7 +310,8 @@ def include_data_structures(pg): | |
@pg.production("comp_for : FOR exprlist IN or_test comp_iter") | |
@pg.production("list_for : FOR exprlist IN old_test list_iter") | |
@pg.production("list_for : FOR exprlist IN testlist_safe list_iter") | |
- def comp_for_iter((for_, exprlist, in_, or_test, comp_iter)): | |
+ def comp_for_iter(pack): | |
+ (for_, exprlist, in_, or_test, comp_iter) = pack | |
my_ifs = [] | |
for i in comp_iter: | |
if i["type"] != "comprehension_if": | |
@@ -307,12 +332,14 @@ def include_data_structures(pg): | |
@pg.production("list_iter : list_for") | |
@pg.production("comp_iter : comp_for") | |
- def comp_iter_comp_for((comp_for,)): | |
+ def comp_iter_comp_for(pack): | |
+ (comp_for,) = pack | |
return comp_for | |
@pg.production("list_iter : IF old_test") | |
@pg.production("comp_iter : IF old_test") | |
- def comp_iter_if((if_, old_test)): | |
+ def comp_iter_if(pack): | |
+ (if_, old_test) = pack | |
return [{ | |
"type": "comprehension_if", | |
"first_formatting": if_.hidden_tokens_before, | |
@@ -322,7 +349,8 @@ def include_data_structures(pg): | |
@pg.production("list_iter : IF old_test list_iter") | |
@pg.production("comp_iter : IF old_test comp_iter") | |
- def comp_iter_if_comp_iter((if_, old_test, comp_iter)): | |
+ def comp_iter_if_comp_iter(pack): | |
+ (if_, old_test, comp_iter) = pack | |
return [{ | |
"type": "comprehension_if", | |
"first_formatting": if_.hidden_tokens_before, | |
diff --git a/baron/grammator_imports.py b/baron/grammator_imports.py | |
index aca8dc9..57415c7 100644 | |
--- a/baron/grammator_imports.py | |
+++ b/baron/grammator_imports.py | |
@@ -1,13 +1,15 @@ | |
-from utils import create_node_from_token | |
+from .utils import create_node_from_token | |
def include_imports(pg): | |
@pg.production("small_stmt : import") | |
@pg.production("small_stmt : from_import") | |
- def separator((statement,)): | |
+ def separator(pack): | |
+ (statement,) = pack | |
return statement | |
@pg.production("import : IMPORT dotted_as_names") | |
- def importeu((import_, dotted_as_names)): | |
+ def importeu(pack): | |
+ (import_, dotted_as_names) = pack | |
return { | |
"type": "import", | |
"value": dotted_as_names, | |
@@ -16,7 +18,8 @@ def include_imports(pg): | |
} | |
@pg.production("from_import : FROM dotted_name IMPORT from_import_target") | |
- def from_import_with_space((from_, dotted_name, import_, from_import_target)): | |
+ def from_import_with_space(pack): | |
+ (from_, dotted_name, import_, from_import_target) = pack | |
return { | |
"type": "from_import", | |
"targets": from_import_target, | |
@@ -30,11 +33,13 @@ def include_imports(pg): | |
} | |
@pg.production("from_import_target : name_as_names") | |
- def from_import_target_name_as_names((name_as_names,)): | |
+ def from_import_target_name_as_names(pack): | |
+ (name_as_names,) = pack | |
return name_as_names | |
@pg.production("from_import_target : LEFT_PARENTHESIS name_as_names RIGHT_PARENTHESIS") | |
- def from_import_parenthesis((left_parenthesis, name_as_names, right_parenthesis)): | |
+ def from_import_parenthesis(pack): | |
+ (left_parenthesis, name_as_names, right_parenthesis) = pack | |
return left_parenthesis.hidden_tokens_before +\ | |
[{"type": "left_parenthesis", "value": "("}] +\ | |
left_parenthesis.hidden_tokens_after +\ | |
@@ -44,7 +49,8 @@ def include_imports(pg): | |
right_parenthesis.hidden_tokens_after | |
@pg.production("from_import_target : STAR") | |
- def from_import_star((star,)): | |
+ def from_import_star(pack): | |
+ (star,) = pack | |
return [{ | |
"type": "star", | |
"value": "*", | |
@@ -53,15 +59,18 @@ def include_imports(pg): | |
}] | |
@pg.production("name_as_names : name_as_names name_as_name") | |
- def name_as_names_name_as_name((name_as_names, name_as_name)): | |
+ def name_as_names_name_as_name(pack): | |
+ (name_as_names, name_as_name) = pack | |
return name_as_names + name_as_name | |
@pg.production("name_as_names : name_as_name") | |
- def name_as_names((name_as_name,)): | |
+ def name_as_names(pack): | |
+ (name_as_name,) = pack | |
return name_as_name | |
@pg.production("name_as_name : NAME AS NAME") | |
- def name_as_name_name_as_name((name, as_, name2)): | |
+ def name_as_name_name_as_name(pack): | |
+ (name, as_, name2) = pack | |
return [{ | |
"type": "name_as_name", | |
"value": name.value, | |
@@ -72,7 +81,8 @@ def include_imports(pg): | |
}] | |
@pg.production("name_as_name : NAME") | |
- def name_as_name_name((name,)): | |
+ def name_as_name_name(pack): | |
+ (name,) = pack | |
return [{ | |
"type": "name_as_name", | |
"value": name.value, | |
@@ -83,7 +93,8 @@ def include_imports(pg): | |
}] | |
@pg.production("name_as_name : NAME SPACE") | |
- def name_as_name_name_space((name, space)): | |
+ def name_as_name_name_space(pack): | |
+ (name, space) = pack | |
return [{ | |
"type": "name_as_name", | |
"target": None, | |
@@ -93,19 +104,23 @@ def include_imports(pg): | |
}] + [create_node_from_token(space)] | |
@pg.production("name_as_name : comma") | |
- def name_as_name_comma_space((comma,)): | |
+ def name_as_name_comma_space(pack): | |
+ (comma,) = pack | |
return [comma] | |
@pg.production("dotted_as_names : dotted_as_names comma dotted_as_name") | |
- def dotted_as_names_dotted_as_names_dotted_as_name((dotted_as_names, comma, dotted_as_names2)): | |
+ def dotted_as_names_dotted_as_names_dotted_as_name(pack): | |
+ (dotted_as_names, comma, dotted_as_names2) = pack | |
return dotted_as_names + [comma] + dotted_as_names2 | |
@pg.production("dotted_as_names : dotted_as_name") | |
- def dotted_as_names_dotted_as_name((dotted_as_name,)): | |
+ def dotted_as_names_dotted_as_name(pack): | |
+ (dotted_as_name,) = pack | |
return dotted_as_name | |
@pg.production("dotted_as_name : dotted_name AS NAME") | |
- def dotted_as_name_as((dotted_as_name, as_, name)): | |
+ def dotted_as_name_as(pack): | |
+ (dotted_as_name, as_, name) = pack | |
return [{ | |
"type": "dotted_as_name", | |
"value": { | |
@@ -119,7 +134,8 @@ def include_imports(pg): | |
}] | |
@pg.production("dotted_as_name : dotted_name") | |
- def dotted_as_name((dotted_name,)): | |
+ def dotted_as_name(pack): | |
+ (dotted_name,) = pack | |
return [{ | |
"type": "dotted_as_name", | |
"value": { | |
@@ -133,20 +149,24 @@ def include_imports(pg): | |
}] | |
@pg.production("dotted_name : dotted_name dotted_name_element") | |
- def dotted_name_elements_element((dotted_name, dotted_name_element)): | |
+ def dotted_name_elements_element(pack): | |
+ (dotted_name, dotted_name_element) = pack | |
return dotted_name + dotted_name_element | |
@pg.production("dotted_name : dotted_name_element") | |
- def dotted_name_element((dotted_name_element,)): | |
+ def dotted_name_element(pack): | |
+ (dotted_name_element,) = pack | |
return dotted_name_element | |
@pg.production("dotted_name_element : NAME") | |
@pg.production("dotted_name_element : SPACE") | |
- def dotted_name((token,)): | |
+ def dotted_name(pack): | |
+ (token,) = pack | |
return [create_node_from_token(token)] | |
@pg.production("dotted_name_element : DOT") | |
- def dotted_name_dot((dot,)): | |
+ def dotted_name_dot(pack): | |
+ (dot,) = pack | |
return [{ | |
"type": "dot", | |
"first_formatting": dot.hidden_tokens_before, | |
diff --git a/baron/grammator_operators.py b/baron/grammator_operators.py | |
index bbd014d..ca6e8fd 100644 | |
--- a/baron/grammator_operators.py | |
+++ b/baron/grammator_operators.py | |
@@ -1,20 +1,24 @@ | |
def include_operators(pg): | |
@pg.production("old_test : or_test") | |
@pg.production("old_test : old_lambdef") | |
- def old_test((level,)): | |
+ def old_test(pack): | |
+ (level,) = pack | |
return level | |
@pg.production("testlist_safe : old_test comma old_test") | |
- def testlist_safe((old_test, comma, old_test2)): | |
+ def testlist_safe(pack): | |
+ (old_test, comma, old_test2) = pack | |
return [old_test, comma, old_test2] | |
@pg.production("testlist_safe : old_test comma testlist_safe") | |
- def testlist_safe_more((old_test, comma, testlist_safe)): | |
+ def testlist_safe_more(pack): | |
+ (old_test, comma, testlist_safe) = pack | |
return [old_test, comma] + testlist_safe | |
@pg.production("expr_stmt : testlist augassign_operator testlist") | |
@pg.production("expr_stmt : testlist augassign_operator yield_expr") | |
- def augmented_assignment_node((target, operator, value)): | |
+ def augmented_assignment_node(pack): | |
+ (target, operator, value) = pack | |
return { | |
"type": "assignment", | |
"first_formatting": operator.hidden_tokens_before, | |
@@ -36,12 +40,14 @@ def include_operators(pg): | |
@pg.production("augassign_operator : RIGHT_SHIFT_EQUAL") | |
@pg.production("augassign_operator : DOUBLE_STAR_EQUAL") | |
@pg.production("augassign_operator : DOUBLE_SLASH_EQUAL") | |
- def augassign_operator((operator,)): | |
+ def augassign_operator(pack): | |
+ (operator,) = pack | |
return operator | |
@pg.production("expr_stmt : testlist EQUAL yield_expr") | |
@pg.production("expr_stmt : testlist EQUAL expr_stmt") | |
- def assignment_node((target, equal, value)): | |
+ def assignment_node(pack): | |
+ (target, equal, value) = pack | |
return { | |
"type": "assignment", | |
"value": value, | |
@@ -51,7 +57,8 @@ def include_operators(pg): | |
} | |
@pg.production("test : or_test IF or_test ELSE test") | |
- def ternary_operator_node((first, if_, second, else_, third)): | |
+ def ternary_operator_node(pack): | |
+ (first, if_, second, else_, third) = pack | |
return { | |
"type": "ternary_operator", | |
"first": first, | |
@@ -65,7 +72,8 @@ def include_operators(pg): | |
@pg.production("or_test : and_test OR or_test") | |
@pg.production("and_test : not_test AND and_test") | |
- def and_or_node((first, operator, second)): | |
+ def and_or_node(pack): | |
+ (first, operator, second) = pack | |
return { | |
"type": "boolean_operator", | |
"value": operator.value, | |
@@ -76,7 +84,8 @@ def include_operators(pg): | |
} | |
@pg.production("not_test : NOT not_test") | |
- def not_node((not_, comparison)): | |
+ def not_node(pack): | |
+ (not_, comparison) = pack | |
return { | |
"type": "unitary_operator", | |
"value": "not", | |
@@ -92,7 +101,8 @@ def include_operators(pg): | |
@pg.production("comparison : expr NOT_EQUAL comparison") | |
@pg.production("comparison : expr IN comparison") | |
@pg.production("comparison : expr IS comparison") | |
- def comparison_node((expr, comparison_operator, comparison_)): | |
+ def comparison_node(pack): | |
+ (expr, comparison_operator, comparison_) = pack | |
return { | |
"type": "comparison", | |
"value": comparison_operator.value, | |
@@ -105,7 +115,8 @@ def include_operators(pg): | |
@pg.production("comparison : expr IS NOT comparison") | |
@pg.production("comparison : expr NOT IN comparison") | |
- def comparison_advanced_node((expr, comparison_operator, comparison_operator2, comparison_)): | |
+ def comparison_advanced_node(pack): | |
+ (expr, comparison_operator, comparison_operator2, comparison_) = pack | |
return { | |
"type": "comparison", | |
"value": "%s %s" % ( | |
@@ -132,7 +143,8 @@ def include_operators(pg): | |
@pg.production("term : factor DOUBLE_SLASH term") | |
@pg.production("power : atom DOUBLE_STAR factor") | |
@pg.production("power : atom DOUBLE_STAR power") | |
- def binary_operator_node((first, operator, second)): | |
+ def binary_operator_node(pack): | |
+ (first, operator, second) = pack | |
return { | |
"type": "binary_operator", | |
"value": operator.value, | |
@@ -145,7 +157,8 @@ def include_operators(pg): | |
@pg.production("factor : PLUS factor") | |
@pg.production("factor : MINUS factor") | |
@pg.production("factor : TILDE factor") | |
- def factor_unitary_operator_space((operator, factor,)): | |
+ def factor_unitary_operator_space(pack): | |
+ (operator, factor,) = pack | |
return { | |
"type": "unitary_operator", | |
"value": operator.value, | |
@@ -155,7 +168,8 @@ def include_operators(pg): | |
@pg.production("power : atomtrailers DOUBLE_STAR factor") | |
@pg.production("power : atomtrailers DOUBLE_STAR power") | |
- def power_atomtrailer_power((atomtrailers, double_star, factor)): | |
+ def power_atomtrailer_power(pack): | |
+ (atomtrailers, double_star, factor) = pack | |
return { | |
"type": "binary_operator", | |
"value": double_star.value, | |
@@ -169,30 +183,36 @@ def include_operators(pg): | |
} | |
@pg.production("power : atomtrailers") | |
- def power_atomtrailers((atomtrailers,)): | |
+ def power_atomtrailers(pack): | |
+ (atomtrailers,) = pack | |
return { | |
"type": "atomtrailers", | |
"value": atomtrailers | |
} | |
@pg.production("atomtrailers : atom") | |
- def atomtrailers_atom((atom,)): | |
+ def atomtrailers_atom(pack): | |
+ (atom,) = pack | |
return [atom] | |
@pg.production("atomtrailers : atom trailers") | |
- def atomtrailer((atom, trailers)): | |
+ def atomtrailer(pack): | |
+ (atom, trailers) = pack | |
return [atom] + trailers | |
@pg.production("trailers : trailer") | |
- def trailers((trailer,)): | |
+ def trailers(pack): | |
+ (trailer,) = pack | |
return trailer | |
@pg.production("trailers : trailers trailer") | |
- def trailers_trailer((trailers, trailer)): | |
+ def trailers_trailer(pack): | |
+ (trailers, trailer) = pack | |
return trailers + trailer | |
@pg.production("trailer : DOT NAME") | |
- def trailer((dot, name,)): | |
+ def trailer(pack): | |
+ (dot, name,) = pack | |
return [{ | |
"type": "dot", | |
"first_formatting": dot.hidden_tokens_before, | |
@@ -203,7 +223,8 @@ def include_operators(pg): | |
}] | |
@pg.production("trailer : LEFT_PARENTHESIS argslist RIGHT_PARENTHESIS") | |
- def trailer_call((left, argslist, right)): | |
+ def trailer_call(pack): | |
+ (left, argslist, right) = pack | |
return [{ | |
"type": "call", | |
"value": argslist, | |
@@ -215,7 +236,8 @@ def include_operators(pg): | |
@pg.production("trailer : LEFT_SQUARE_BRACKET subscript RIGHT_SQUARE_BRACKET") | |
@pg.production("trailer : LEFT_SQUARE_BRACKET subscriptlist RIGHT_SQUARE_BRACKET") | |
- def trailer_getitem_ellipsis((left, subscript, right)): | |
+ def trailer_getitem_ellipsis(pack): | |
+ (left, subscript, right) = pack | |
return [{ | |
"type": "getitem", | |
"value": subscript, | |
@@ -226,7 +248,8 @@ def include_operators(pg): | |
}] | |
@pg.production("subscript : DOT DOT DOT") | |
- def subscript_ellipsis((dot1, dot2, dot3)): | |
+ def subscript_ellipsis(pack): | |
+ (dot1, dot2, dot3) = pack | |
return { | |
"type": "ellipsis", | |
"first_formatting": dot1.hidden_tokens_after, | |
@@ -235,11 +258,13 @@ def include_operators(pg): | |
@pg.production("subscript : test") | |
@pg.production("subscript : slice") | |
- def subscript_test((test,)): | |
+ def subscript_test(pack): | |
+ (test,) = pack | |
return test | |
@pg.production("slice : COLON") | |
- def slice((colon,)): | |
+ def slice(pack): | |
+ (colon,) = pack | |
return { | |
"type": "slice", | |
"lower": {}, | |
@@ -253,7 +278,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : COLON COLON") | |
- def slice_colon((colon, colon2)): | |
+ def slice_colon(pack): | |
+ (colon, colon2) = pack | |
return { | |
"type": "slice", | |
"lower": {}, | |
@@ -267,7 +293,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : test COLON") | |
- def slice_lower((test, colon,)): | |
+ def slice_lower(pack): | |
+ (test, colon,) = pack | |
return { | |
"type": "slice", | |
"lower": test, | |
@@ -281,7 +308,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : test COLON COLON") | |
- def slice_lower_colon_colon((test, colon, colon2)): | |
+ def slice_lower_colon_colon(pack): | |
+ (test, colon, colon2) = pack | |
return { | |
"type": "slice", | |
"lower": test, | |
@@ -295,7 +323,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : COLON test") | |
- def slice_upper((colon, test,)): | |
+ def slice_upper(pack): | |
+ (colon, test,) = pack | |
return { | |
"type": "slice", | |
"lower": {}, | |
@@ -309,7 +338,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : COLON test COLON") | |
- def slice_upper_colon((colon, test, colon2)): | |
+ def slice_upper_colon(pack): | |
+ (colon, test, colon2) = pack | |
return { | |
"type": "slice", | |
"lower": {}, | |
@@ -323,7 +353,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : COLON COLON test") | |
- def slice_step((colon, colon2, test)): | |
+ def slice_step(pack): | |
+ (colon, colon2, test) = pack | |
return { | |
"type": "slice", | |
"lower": {}, | |
@@ -337,7 +368,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : test COLON test") | |
- def slice_lower_upper((test, colon, test2,)): | |
+ def slice_lower_upper(pack): | |
+ (test, colon, test2,) = pack | |
return { | |
"type": "slice", | |
"lower": test, | |
@@ -351,7 +383,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : test COLON test COLON") | |
- def slice_lower_upper_colon((test, colon, test2, colon2)): | |
+ def slice_lower_upper_colon(pack): | |
+ (test, colon, test2, colon2) = pack | |
return { | |
"type": "slice", | |
"lower": test, | |
@@ -365,7 +398,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : test COLON COLON test") | |
- def slice_lower_step((test, colon, colon2, test2)): | |
+ def slice_lower_step(pack): | |
+ (test, colon, colon2, test2) = pack | |
return { | |
"type": "slice", | |
"lower": test, | |
@@ -379,7 +413,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : COLON test COLON test") | |
- def slice_upper_step((colon, test, colon2, test2)): | |
+ def slice_upper_step(pack): | |
+ (colon, test, colon2, test2) = pack | |
return { | |
"type": "slice", | |
"lower": {}, | |
@@ -393,7 +428,8 @@ def include_operators(pg): | |
} | |
@pg.production("slice : test COLON test COLON test") | |
- def slice_lower_upper_step((test, colon, test2, colon2, test3)): | |
+ def slice_lower_upper_step(pack): | |
+ (test, colon, test2, colon2, test3) = pack | |
return { | |
"type": "slice", | |
"lower": test, | |
diff --git a/baron/grammator_primitives.py b/baron/grammator_primitives.py | |
index b2ef5bb..221d282 100644 | |
--- a/baron/grammator_primitives.py | |
+++ b/baron/grammator_primitives.py | |
@@ -1,10 +1,11 @@ | |
-from utils import create_node_from_token | |
+from .utils import create_node_from_token | |
def include_primivites(pg, print_function): | |
if not print_function: | |
@pg.production("print_stmt : PRINT") | |
- def print_stmt_empty((print_,)): | |
+ def print_stmt_empty(pack): | |
+ (print_,) = pack | |
return { | |
"type": "print", | |
"value": None, | |
@@ -15,7 +16,8 @@ def include_primivites(pg, print_function): | |
@pg.production("print_stmt : PRINT testlist") | |
- def print_stmt((print_, testlist)): | |
+ def print_stmt(pack): | |
+ (print_, testlist) = pack | |
return { | |
"type": "print", | |
"value": testlist["value"] if testlist["type"] == "tuple" and testlist["with_parenthesis"] == False else [testlist], | |
@@ -26,7 +28,8 @@ def include_primivites(pg, print_function): | |
@pg.production("print_stmt : PRINT RIGHT_SHIFT test") | |
- def print_stmt_redirect((print_, right_shift, test)): | |
+ def print_stmt_redirect(pack): | |
+ (print_, right_shift, test) = pack | |
return { | |
"type": "print", | |
"value": None, | |
@@ -37,7 +40,8 @@ def include_primivites(pg, print_function): | |
@pg.production("print_stmt : PRINT RIGHT_SHIFT test COMMA testlist") | |
- def print_stmt_redirect_testlist((print_, right_shift, test, comma, testlist)): | |
+ def print_stmt_redirect_testlist(pack): | |
+ (print_, right_shift, test, comma, testlist) = pack | |
value = [{ | |
"type": "comma", | |
"first_formatting": comma.hidden_tokens_before, | |
@@ -59,13 +63,15 @@ def include_primivites(pg, print_function): | |
@pg.production("flow_stmt : continue_stmt") | |
@pg.production("flow_stmt : yield_stmt") | |
@pg.production("yield_stmt : yield_expr") | |
- def flow((flow_stmt,)): | |
+ def flow(pack): | |
+ (flow_stmt,) = pack | |
return flow_stmt | |
@pg.production("return_stmt : RETURN") | |
@pg.production("yield_expr : YIELD") | |
- def return_empty((token,)): | |
+ def return_empty(pack): | |
+ (token,) = pack | |
return { | |
"type": token.name.lower(), | |
"value": None, | |
@@ -76,12 +82,14 @@ def include_primivites(pg, print_function): | |
@pg.production("break_stmt : BREAK") | |
@pg.production("continue_stmt : CONTINUE") | |
@pg.production("pass_stmt : PASS") | |
- def break_stmt((token,)): | |
+ def break_stmt(pack): | |
+ (token,) = pack | |
return {"type": token.name.lower()} | |
@pg.production("raise_stmt : RAISE") | |
- def raise_stmt_empty((raise_,)): | |
+ def raise_stmt_empty(pack): | |
+ (raise_,) = pack | |
return { | |
"type": "raise", | |
"value": None, | |
@@ -96,7 +104,8 @@ def include_primivites(pg, print_function): | |
@pg.production("raise_stmt : RAISE test") | |
- def raise_stmt((raise_, test)): | |
+ def raise_stmt(pack): | |
+ (raise_, test) = pack | |
return { | |
"type": "raise", | |
"value": test, | |
@@ -111,7 +120,8 @@ def include_primivites(pg, print_function): | |
@pg.production("raise_stmt : RAISE test COMMA test") | |
- def raise_stmt_instance((raise_, test, comma, test2)): | |
+ def raise_stmt_instance(pack): | |
+ (raise_, test, comma, test2) = pack | |
return { | |
"type": "raise", | |
"value": test, | |
@@ -126,7 +136,8 @@ def include_primivites(pg, print_function): | |
@pg.production("raise_stmt : RAISE test COMMA test COMMA test") | |
- def raise_stmt_instance_traceback((raise_, test, comma, test2, comma2, test3)): | |
+ def raise_stmt_instance_traceback(pack): | |
+ (raise_, test, comma, test2, comma2, test3) = pack | |
return { | |
"type": "raise", | |
"value": test, | |
@@ -141,7 +152,8 @@ def include_primivites(pg, print_function): | |
@pg.production("assert_stmt : EXEC expr") | |
- def exec_stmt((exec_, expr)): | |
+ def exec_stmt(pack): | |
+ (exec_, expr) = pack | |
return { | |
"type": "exec", | |
"value": expr, | |
@@ -156,7 +168,8 @@ def include_primivites(pg, print_function): | |
@pg.production("assert_stmt : EXEC expr IN test") | |
- def exec_stmt_in((exec_, expr, in_, test)): | |
+ def exec_stmt_in(pack): | |
+ (exec_, expr, in_, test) = pack | |
return { | |
"type": "exec", | |
"value": expr, | |
@@ -171,7 +184,8 @@ def include_primivites(pg, print_function): | |
@pg.production("assert_stmt : EXEC expr IN test COMMA test") | |
- def exec_stmt_in_comma((exec_, expr, in_, test, comma, test2)): | |
+ def exec_stmt_in_comma(pack): | |
+ (exec_, expr, in_, test, comma, test2) = pack | |
return { | |
"type": "exec", | |
"value": expr, | |
@@ -186,7 +200,8 @@ def include_primivites(pg, print_function): | |
@pg.production("assert_stmt : ASSERT test") | |
- def assert_stmt((assert_, test)): | |
+ def assert_stmt(pack): | |
+ (assert_, test) = pack | |
return { | |
"type": "assert", | |
"value": test, | |
@@ -198,7 +213,8 @@ def include_primivites(pg, print_function): | |
@pg.production("assert_stmt : ASSERT test COMMA test") | |
- def assert_stmt_message((assert_, test, comma, test2)): | |
+ def assert_stmt_message(pack): | |
+ (assert_, test, comma, test2) = pack | |
return { | |
"type": "assert", | |
"value": test, | |
@@ -210,7 +226,8 @@ def include_primivites(pg, print_function): | |
@pg.production("global_stmt : GLOBAL names") | |
- def global_stmt((global_, names)): | |
+ def global_stmt(pack): | |
+ (global_, names) = pack | |
return { | |
"type": "global", | |
"formatting": global_.hidden_tokens_after, | |
@@ -219,19 +236,22 @@ def include_primivites(pg, print_function): | |
@pg.production("names : NAME") | |
- def names_name((name,)): | |
+ def names_name(pack): | |
+ (name,) = pack | |
return [create_node_from_token(name)] | |
@pg.production("names : names comma name") | |
- def names_names_name((names, comma, name,)): | |
+ def names_names_name(pack): | |
+ (names, comma, name,) = pack | |
return names + [comma, name] | |
@pg.production("return_stmt : RETURN testlist") | |
@pg.production("yield_expr : YIELD testlist") | |
@pg.production("del_stmt : DEL exprlist") | |
- def return_testlist((token, testlist)): | |
+ def return_testlist(pack): | |
+ (token, testlist) = pack | |
return { | |
"type": token.name.lower(), | |
"value": testlist, | |
@@ -240,7 +260,8 @@ def include_primivites(pg, print_function): | |
@pg.production("lambdef : LAMBDA COLON test") | |
@pg.production("old_lambdef : LAMBDA COLON old_test") | |
- def lambdef((lambda_, colon, test)): | |
+ def lambdef(pack): | |
+ (lambda_, colon, test) = pack | |
return { | |
"type": "lambda", | |
"arguments": [], | |
@@ -252,7 +273,8 @@ def include_primivites(pg, print_function): | |
@pg.production("lambdef : LAMBDA parameters COLON test") | |
@pg.production("old_lambdef : LAMBDA parameters COLON old_test") | |
- def lambdef_arguments((lambda_, parameters, colon, test)): | |
+ def lambdef_arguments(pack): | |
+ (lambda_, parameters, colon, test) = pack | |
return { | |
"type": "lambda", | |
"arguments": parameters, | |
diff --git a/baron/grouper.py b/baron/grouper.py | |
index d96fc9b..d052f5a 100644 | |
--- a/baron/grouper.py | |
+++ b/baron/grouper.py | |
@@ -1,7 +1,7 @@ | |
# encoding: utf-8 | |
import re | |
-from utils import FlexibleIterator | |
+from .utils import FlexibleIterator | |
to_group = ( | |
("+", "="), | |
@@ -28,7 +28,7 @@ to_group = ( | |
("\r", "\n"), | |
) | |
-to_group_keys, _ = zip(*to_group) | |
+to_group_keys, _ = list(zip(*to_group)) | |
def group(sequence): | |
@@ -42,61 +42,61 @@ def group_generator(sequence): | |
if iterator.end(): | |
return | |
- current = iterator.next() | |
+ current = next(iterator) | |
if current in to_group_keys and matching_found(to_group, current, iterator.show_next()): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if current in to_group_keys and matching_found(to_group, current, iterator.show_next()): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if current in list('uUrRbB') and str(iterator.show_next()).startswith(('"', "'")): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if str(current).lower() in ["ur", "br"] and str(iterator.show_next()).startswith(('"', "'")): | |
- current += iterator.next() | |
- if any(map(lambda x: re.match(x, current), (r'^\d+e$', r'^\d+\.\d*e$', r'^\.\d+e$'))): | |
- current += iterator.next() | |
- current += iterator.next() | |
+ current += next(iterator) | |
+ if any([re.match(x, current) for x in (r'^\d+e$', r'^\d+\.\d*e$', r'^\.\d+e$')]): | |
+ current += next(iterator) | |
+ current += next(iterator) | |
# I'm obligatory in a case where I have something like that: | |
# ['123.123e', '[+-]', '123'] | |
assert re.match(r'^\d+[eE][-+]?\d+$', current) or re.match(r'^\d*.\d*[eE][-+]?\d+$', current) | |
if current == "\\" and iterator.show_next() in ('\n', '\r\n'): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if re.match(r'^\s+$', str(iterator.show_next())): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if current == "\\" and iterator.show_next() == "\r" and iterator.show_next(2) == "\n": | |
- current += iterator.next() | |
- current += iterator.next() | |
+ current += next(iterator) | |
+ current += next(iterator) | |
if re.match(r'^\s+$', str(iterator.show_next())): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if re.match(r'^\s+$', current) and iterator.show_next() == "\\": | |
- current += iterator.next() | |
- current += iterator.next() | |
+ current += next(iterator) | |
+ current += next(iterator) | |
if iterator.show_next() == "\n": | |
- current += iterator.next() | |
+ current += next(iterator) | |
if re.match(r'^\s+$', str(iterator.show_next())): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if (re.match(r'^\d+$', current) and iterator.show_next() and iterator.show_next() == ".") or\ | |
(current == "." and iterator.show_next() and re.match(r'^\d+[jJ]?$', iterator.show_next())): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if iterator.show_next() and re.match(r'^\d*[jJ]?$', iterator.show_next()) and re.match(r'^\d*[jJ]?$', iterator.show_next()).group(): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if re.match(r'^\d+\.$', current) and iterator.show_next() and re.match(r'^\d*[eE]\d*$', iterator.show_next()): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if re.match(r'^\d+\.?[eE]$', current) and iterator.show_next() and re.match(r'^\d+$', iterator.show_next()): | |
- current += iterator.next() | |
+ current += next(iterator) | |
if re.match(r'^\d+\.?\d*[eE]$', current) and iterator.show_next() and iterator.show_next() in "-+" and re.match(r'^\d+$', iterator.show_next(2) if iterator.show_next(2) else ""): | |
- current += iterator.next() | |
- current += iterator.next() | |
+ current += next(iterator) | |
+ current += next(iterator) | |
yield current | |
def matching_found(to_group, current, target): | |
- return target in zip(*filter(lambda x: x[0] == current, to_group))[1] | |
+ return target in list(zip(*[x for x in to_group if x[0] == current]))[1] | |
diff --git a/baron/helpers.py b/baron/helpers.py | |
index 456fdee..aa93cf5 100644 | |
--- a/baron/helpers.py | |
+++ b/baron/helpers.py | |
@@ -3,9 +3,9 @@ from . import parse | |
def show(source_code): | |
- print json.dumps(parse(source_code), indent=4) | |
+ print(json.dumps(parse(source_code), indent=4)) | |
def show_file(target_file): | |
with open(target_file, "r") as source_code: | |
- print json.dumps(parse(source_code.read()), indent=4) | |
+ print(json.dumps(parse(source_code.read()), indent=4)) | |
diff --git a/baron/indentation_marker.py b/baron/indentation_marker.py | |
index c09551e..b1be09f 100644 | |
--- a/baron/indentation_marker.py | |
+++ b/baron/indentation_marker.py | |
@@ -1,4 +1,4 @@ | |
-from utils import FlexibleIterator | |
+from .utils import FlexibleIterator | |
""" | |
Objectif: add an INDENT token and a DEDENT token arround every block | |
@@ -25,10 +25,10 @@ def mark_indentation(sequence): | |
def get_space(node): | |
if len(node) < 3: | |
- print "WARNING" | |
+ print("WARNING") | |
return None | |
if len(node[3]) == 0: | |
- print "WARNING" | |
+ print("WARNING") | |
return None | |
return node[3][0][1].replace(" ", " "*8) | |
@@ -41,7 +41,7 @@ def mark_indentation_generator(sequence): | |
if iterator.end(): | |
return | |
- current = iterator.next() | |
+ current = next(iterator) | |
if current is None: | |
return | |
@@ -56,7 +56,7 @@ def mark_indentation_generator(sequence): | |
if iterator.show_next(2)[0] not in ("ENDL",): | |
indentations.append(get_space(iterator.show_next())) | |
yield current | |
- yield iterator.next() | |
+ yield next(iterator) | |
yield ('INDENT', '') | |
continue | |
else: | |
@@ -76,7 +76,7 @@ def mark_indentation_generator(sequence): | |
while indentations and indentations[-1] > new_indent: | |
indentations.pop() | |
yield ('DEDENT', '') | |
- yield iterator.next() | |
+ yield next(iterator) | |
continue | |
yield current | |
diff --git a/baron/inner_formatting_grouper.py b/baron/inner_formatting_grouper.py | |
index 7612781..15dc1ff 100644 | |
--- a/baron/inner_formatting_grouper.py | |
+++ b/baron/inner_formatting_grouper.py | |
@@ -1,4 +1,4 @@ | |
-from utils import FlexibleIterator | |
+from .utils import FlexibleIterator | |
class UnExpectedFormattingToken(Exception): | |
pass | |
@@ -116,9 +116,9 @@ def fail_on_bad_token(token, debug_file_content, in_grouping_mode): | |
debug_file_content += _append_to_debug_file_content(token) | |
debug_file_content = debug_file_content.split("\n") | |
- debug_file_content = zip(range(1, len(debug_file_content) + 1), debug_file_content) | |
+ debug_file_content = list(zip(list(range(1, len(debug_file_content) + 1)), debug_file_content)) | |
debug_file_content = debug_file_content[-8:] | |
- debug_file_content = "\n".join(map(lambda x: "%4s %s" % (x[0], x[1]), debug_file_content)) | |
+ debug_file_content = "\n".join(["%4s %s" % (x[0], x[1]) for x in debug_file_content]) | |
raise Exception("Fail to group formatting tokens, here:\n%s <----\n\n'%s' should have been in: %s\n\nCurrent value of 'in_grouping_mode': %s" % (debug_file_content, token, ', '.join(sorted(GROUP_ON)), in_grouping_mode)) | |
@@ -138,7 +138,7 @@ def group_generator(sequence): | |
return | |
debug_previous_token = current | |
- current = iterator.next() | |
+ current = next(iterator) | |
debug_file_content += _append_to_debug_file_content(current) | |
if current[0] in ENTER_GROUPING_MODE: | |
@@ -150,17 +150,17 @@ def group_generator(sequence): | |
if current[0] in GROUP_THOSE: | |
to_group = [current] | |
while iterator.show_next() and iterator.show_next()[0] in GROUP_THOSE: | |
- to_group.append(iterator.next()) | |
+ to_group.append(next(iterator)) | |
debug_file_content += _append_to_debug_file_content(to_group[-1]) | |
# XXX don't remember how (:() but I can end up finding a | |
# DEDENT/INDENT token in this situation and I don't want to | |
# group on it. Need to do test for that. | |
if iterator.show_next()[0] in ("INDENT", "DEDENT"): | |
- yield iterator.next() | |
+ yield next(iterator) | |
fail_on_bad_token(iterator.show_next(), debug_file_content, in_grouping_mode) | |
- current = append_to_token_before(iterator.next(), to_group) | |
+ current = append_to_token_before(next(iterator), to_group) | |
# TODO test | |
if current[0] in QUIT_GROUPING_MODE: | |
@@ -172,14 +172,14 @@ def group_generator(sequence): | |
if current[0] in GROUP_ON: | |
while iterator.show_next() and iterator.show_next()[0] in GROUP_THOSE: | |
debug_file_content += _append_to_debug_file_content(iterator.show_next()) | |
- current = append_to_token_after(current, [iterator.next()]) | |
+ current = append_to_token_after(current, [next(iterator)]) | |
if current[0] == "SPACE": | |
debug_file_content = debug_file_content.split("\n") | |
- debug_file_content = zip(range(1, len(debug_file_content) + 1), debug_file_content) | |
+ debug_file_content = list(zip(list(range(1, len(debug_file_content) + 1)), debug_file_content)) | |
debug_file_content = debug_file_content[-3:] | |
- debug_file_content = "\n".join(map(lambda x: "%4s %s" % (x[0], x[1]), debug_file_content)) | |
+ debug_file_content = "\n".join(["%4s %s" % (x[0], x[1]) for x in debug_file_content]) | |
debug_file_content += "<--- here" | |
debug_text = "Unexpected '%s' token:\n\n" % current[0].lower() + debug_file_content + "\n\n" | |
debug_text += "Should have been grouped on either %s (before) or %s (after) token." % (debug_previous_token, iterator.show_next()) | |
diff --git a/baron/parser.py b/baron/parser.py | |
index b85b122..96d0775 100644 | |
--- a/baron/parser.py | |
+++ b/baron/parser.py | |
@@ -4,7 +4,7 @@ import stat | |
import tempfile | |
import warnings | |
-from token import BaronToken | |
+from .token import BaronToken | |
from rply import ParserGenerator | |
from rply.parser import LRParser | |
@@ -151,9 +151,9 @@ class BaronLRParser(LRParser): | |
raise AssertionError("For now, error_handler must raise.") | |
else: | |
debug_output = parsed_file_content.split("\n") | |
- debug_output = zip(range(1, len(debug_output) + 1), debug_output) | |
+ debug_output = list(zip(list(range(1, len(debug_output) + 1)), debug_output)) | |
debug_output = debug_output[-8:] | |
- debug_output = "\n".join(map(lambda x: "%4s %s" % (x[0], x[1]), debug_output)) | |
+ debug_output = "\n".join(["%4s %s" % (x[0], x[1]) for x in debug_output]) | |
debug_output += "<---- here" | |
debug_output = "Error, got an unexpected token %s here:\n\n" % ltype + debug_output | |
debug_output += "\n\nThe token %s should be one of those: %s" % (ltype, ", ".join(sorted(self.lr_table.lr_action[current_state].keys()))) | |
diff --git a/baron/spliter.py b/baron/spliter.py | |
index 6b01cc2..0695b30 100644 | |
--- a/baron/spliter.py | |
+++ b/baron/spliter.py | |
@@ -1,5 +1,5 @@ | |
import string | |
-from utils import FlexibleIterator | |
+from .utils import FlexibleIterator | |
def split(sequence): | |
@@ -22,19 +22,19 @@ def split_generator(sequence): | |
for section in ("'", '"'): | |
if iterator.next_starts_with(section * 3): | |
not_found = False | |
- result = iterator.next() | |
- result += iterator.next() | |
- result += iterator.next() | |
+ result = next(iterator) | |
+ result += next(iterator) | |
+ result += next(iterator) | |
result += iterator.grab_string(lambda iterator: not iterator.next_starts_with(section * 3)) | |
- result += iterator.next() | |
- result += iterator.next() | |
- result += iterator.next() | |
+ result += next(iterator) | |
+ result += next(iterator) | |
+ result += next(iterator) | |
yield result | |
elif iterator.next_in(section): | |
not_found = False | |
- result = iterator.next() | |
+ result = next(iterator) | |
result += iterator.grab_string(lambda iterator: iterator.show_next() not in section) | |
- result += iterator.next() | |
+ result += next(iterator) | |
yield result | |
for section in (string.ascii_letters + "_" + "1234567890", " \t"): | |
@@ -45,7 +45,7 @@ def split_generator(sequence): | |
for one in "@,.;()=*:+-/^%&<>|\r\n~[]{}!``\\": | |
if iterator.next_in(one): | |
not_found = False | |
- yield iterator.next() | |
+ yield next(iterator) | |
if iterator.show_next().__repr__().startswith("'\\x"): | |
# guys, seriously, how do you manage to put this shit in your code? | |
@@ -54,7 +54,7 @@ def split_generator(sequence): | |
# example of crapy unicode stuff found in some source files: \x0c\xef\xbb\xbf | |
not_found = False | |
# let's drop that crap | |
- iterator.next() | |
+ next(iterator) | |
if not_found: | |
raise Exception("Untreaded elements: %s" % iterator.rest_of_the_sequence().__repr__()[:50]) | |
diff --git a/baron/token.py b/baron/token.py | |
index ee6f5b2..54e991c 100644 | |
--- a/baron/token.py | |
+++ b/baron/token.py | |
@@ -14,8 +14,8 @@ class BaronToken(BaseBox): | |
def __init__(self, name, value, hidden_tokens_before=None, hidden_tokens_after=None): | |
self.name = name | |
self.value = value | |
- self.hidden_tokens_before = map(self._translate_tokens_to_ast_node, hidden_tokens_before if hidden_tokens_before else []) | |
- self.hidden_tokens_after = map(self._translate_tokens_to_ast_node, hidden_tokens_after if hidden_tokens_after else []) | |
+ self.hidden_tokens_before = list(map(self._translate_tokens_to_ast_node, hidden_tokens_before if hidden_tokens_before else [])) | |
+ self.hidden_tokens_after = list(map(self._translate_tokens_to_ast_node, hidden_tokens_after if hidden_tokens_after else [])) | |
def _translate_tokens_to_ast_node(self, token): | |
if token[0] == "ENDL": | |
@@ -23,13 +23,13 @@ class BaronToken(BaseBox): | |
"type": token[0].lower(), | |
"value": token[1], | |
"indent": token[3][0][1] if len(token) == 4 and token[3] else "", | |
- "formatting": map(self._translate_tokens_to_ast_node, token[2]) if len(token) >= 3 else [], | |
+ "formatting": list(map(self._translate_tokens_to_ast_node, token[2])) if len(token) >= 3 else [], | |
} | |
if len(token) >= 3: | |
return { | |
"type": token[0].lower(), | |
"value": token[1], | |
- "formatting": map(self._translate_tokens_to_ast_node, token[2]) if len(token) >= 3 else [], | |
+ "formatting": list(map(self._translate_tokens_to_ast_node, token[2])) if len(token) >= 3 else [], | |
} | |
return { | |
"type": token[0].lower(), | |
@@ -45,8 +45,8 @@ class BaronToken(BaseBox): | |
return self.name == other.name and self.value == other.value | |
def render(self): | |
- before = "".join(map(lambda x: (x["indent"] if x["type"] == "endl" else "") + x["value"], self.hidden_tokens_before)) | |
- after = "".join(map(lambda x: (x["indent"] if x["type"] == "endl" else "") + x["value"], self.hidden_tokens_after)) | |
+ before = "".join([(x["indent"] if x["type"] == "endl" else "") + x["value"] for x in self.hidden_tokens_before]) | |
+ after = "".join([(x["indent"] if x["type"] == "endl" else "") + x["value"] for x in self.hidden_tokens_after]) | |
#print self.hidden_tokens_before, self.value, self.hidden_tokens_after | |
return before + self.value + after | |
diff --git a/baron/tokenizer.py b/baron/tokenizer.py | |
index d55a929..357d3d7 100644 | |
--- a/baron/tokenizer.py | |
+++ b/baron/tokenizer.py | |
@@ -80,7 +80,7 @@ TOKENS = ( | |
) | |
-TOKENS = map(lambda x: (re.compile('^' + x[0] + '$'), x[1]), TOKENS) | |
+TOKENS = [(re.compile('^' + x[0] + '$'), x[1]) for x in TOKENS] | |
def tokenize(sequence, print_function=False): | |
@@ -89,7 +89,7 @@ def tokenize(sequence, print_function=False): | |
def tokenize_generator(sequence, print_function=False): | |
if print_function is True: | |
- current_keywords = filter(lambda x: x != "print", KEYWORDS) | |
+ current_keywords = [x for x in KEYWORDS if x != "print"] | |
else: | |
current_keywords = KEYWORDS | |
diff --git a/baron/utils.py b/baron/utils.py | |
index 0e8bbbe..661de6a 100644 | |
--- a/baron/utils.py | |
+++ b/baron/utils.py | |
@@ -11,7 +11,7 @@ class PrintFunctionImportFinder(ast.NodeVisitor): | |
# my job is already done | |
return | |
- if node.module == "__future__" and filter(lambda x: x.name == "print_function", node.names): | |
+ if node.module == "__future__" and [x for x in node.names if x.name == "print_function"]: | |
self.print_function = True | |
@@ -23,7 +23,7 @@ class FlexibleIterator(): | |
def __iter__(self): | |
return self | |
- def next(self): | |
+ def __next__(self): | |
self.position += 1 | |
if self.position == len(self.sequence): | |
raise StopIteration | |
@@ -53,7 +53,7 @@ class FlexibleIterator(): | |
to_return = "" | |
current = None | |
while self.show_next() is not None and test(self): | |
- current = self.next() | |
+ current = next(self) | |
to_return += current | |
return to_return | |
@@ -63,7 +63,7 @@ class FlexibleIterator(): | |
current = None | |
escaped = False | |
while self.show_next() is not None and (escaped or test(self)): | |
- current = self.next() | |
+ current = next(self) | |
to_return += current | |
if escaped: | |
escaped = False | |
diff --git a/tests/test_dumper.py b/tests/test_dumper.py | |
index 3c3197b..f85b9bb 100644 | |
--- a/tests/test_dumper.py | |
+++ b/tests/test_dumper.py | |
@@ -3,7 +3,7 @@ | |
import sys | |
import baron | |
-from test_utils import check_dumps | |
+from .test_utils import check_dumps | |
def test_empty(): | |
diff --git a/tests/test_formatting_grouper.py b/tests/test_formatting_grouper.py | |
index e8dfc32..21297a8 100644 | |
--- a/tests/test_formatting_grouper.py | |
+++ b/tests/test_formatting_grouper.py | |
@@ -1,9 +1,9 @@ | |
-from itertools import izip_longest | |
+from itertools import zip_longest | |
from baron.formatting_grouper import group as _group | |
def group(inp, out): | |
- for i, j in izip_longest(_group(inp), out): | |
+ for i, j in zip_longest(_group(inp), out): | |
assert i == j | |
diff --git a/tests/test_grammator.py b/tests/test_grammator.py | |
index 25430e2..cd6e677 100644 | |
--- a/tests/test_grammator.py | |
+++ b/tests/test_grammator.py | |
@@ -1,6 +1,6 @@ | |
#!/usr/bin/python | |
# -*- coding:Utf-8 -*- | |
-from test_utils import parse_simple, parse_multi | |
+from .test_utils import parse_simple, parse_multi | |
def test_empty(): | |
"" | |
diff --git a/tests/test_grammator_control_structures.py b/tests/test_grammator_control_structures.py | |
index fa7b77a..72c5d5a 100644 | |
--- a/tests/test_grammator_control_structures.py | |
+++ b/tests/test_grammator_control_structures.py | |
@@ -1,7 +1,7 @@ | |
#!/usr/bin/python | |
# -*- coding:Utf-8 -*- | |
-from test_utils import parse_multi | |
+from .test_utils import parse_multi | |
def test_if_stmt(): | |
diff --git a/tests/test_grammator_data_structures.py b/tests/test_grammator_data_structures.py | |
index f2d256e..831a45d 100644 | |
--- a/tests/test_grammator_data_structures.py | |
+++ b/tests/test_grammator_data_structures.py | |
@@ -1,6 +1,6 @@ | |
# encoding: utf-8 | |
-from test_utils import parse_simple | |
+from .test_utils import parse_simple | |
def test_empty_tuple(): | |
diff --git a/tests/test_grammator_imports.py b/tests/test_grammator_imports.py | |
index b385688..35d22f2 100644 | |
--- a/tests/test_grammator_imports.py | |
+++ b/tests/test_grammator_imports.py | |
@@ -1,6 +1,6 @@ | |
#!/usr/bin/python | |
# -*- coding:Utf-8 -*- | |
-from test_utils import parse_simple | |
+from .test_utils import parse_simple | |
def test_simple_import(): | |
diff --git a/tests/test_grammator_operators.py b/tests/test_grammator_operators.py | |
index 0e6481d..66be251 100644 | |
--- a/tests/test_grammator_operators.py | |
+++ b/tests/test_grammator_operators.py | |
@@ -1,7 +1,7 @@ | |
#!/usr/bin/python | |
# -*- coding:Utf-8 -*- | |
import pytest | |
-from test_utils import parse_simple | |
+from .test_utils import parse_simple | |
def test_simple_power(): | |
diff --git a/tests/test_grammator_primitives.py b/tests/test_grammator_primitives.py | |
index 5d14cbd..436a672 100644 | |
--- a/tests/test_grammator_primitives.py | |
+++ b/tests/test_grammator_primitives.py | |
@@ -1,6 +1,6 @@ | |
#!/usr/bin/python | |
# -*- coding:Utf-8 -*- | |
-from test_utils import parse_simple | |
+from .test_utils import parse_simple | |
def test_return(): | |
diff --git a/tests/test_indentation_marker.py b/tests/test_indentation_marker.py | |
index 6e013cb..55d27b4 100644 | |
--- a/tests/test_indentation_marker.py | |
+++ b/tests/test_indentation_marker.py | |
@@ -2,11 +2,11 @@ | |
# -*- coding:Utf-8 -*- | |
from baron.indentation_marker import mark_indentation | |
-from itertools import izip_longest | |
+from itertools import zip_longest | |
def check(input, output): | |
- for i, j in izip_longest(mark_indentation(input + [('ENDMARKER', ''), None]), output + [('ENDMARKER', ''), None]): | |
+ for i, j in zip_longest(mark_indentation(input + [('ENDMARKER', ''), None]), output + [('ENDMARKER', ''), None]): | |
assert i == j | |
diff --git a/tests/test_utils.py b/tests/test_utils.py | |
index 3859db7..aa42720 100644 | |
--- a/tests/test_utils.py | |
+++ b/tests/test_utils.py | |
@@ -28,8 +28,8 @@ def check_dumps(source_code): | |
import json | |
import traceback | |
traceback.print_exc(file=sys.stdout) | |
- print "Warning: couldn't write dumps output to debug file, exception: %s" % e | |
- print "Tree: %s" % json.dumps(baron_parse(source_code), indent=4) | |
+ print("Warning: couldn't write dumps output to debug file, exception: %s" % e) | |
+ print() | |
+ print("Tree: %s" % json.dumps(baron_parse(source_code), indent=4)) | |
assert dumps(baron_parse(source_code)) == source_code | |
-- | |
1.8.3.4 | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 71b07da40f2dee96008e042b2a8a61b159cb4a48 Mon Sep 17 00:00:00 2001 | |
From: Pierre Penninckx | |
Date: Wed, 9 Apr 2014 14:38:18 +0200 | |
Subject: [PATCH 1/2] make it interpretable by python3 | |
--- | |
docs/conf.py | 16 ++++++++-------- | |
redbaron.py | 46 +++++++++++++++++++++++----------------------- | |
tests/test_redbaron.py | 34 +++++++++++++++++----------------- | |
3 files changed, 48 insertions(+), 48 deletions(-) | |
diff --git a/docs/conf.py b/docs/conf.py | |
index 0041b95..2929a82 100644 | |
--- a/docs/conf.py | |
+++ b/docs/conf.py | |
@@ -49,8 +49,8 @@ source_suffix = '.rst' | |
master_doc = 'index' | |
# General information about the project. | |
-project = u'RedBaron' | |
-copyright = u'2014, Laurent Peuch' | |
+project = 'RedBaron' | |
+copyright = '2014, Laurent Peuch' | |
# The version info for the project you're documenting, acts as replacement for | |
# |version| and |release|, also used in various other places throughout the | |
@@ -202,8 +202,8 @@ latex_elements = { | |
# (source start file, target name, title, | |
# author, documentclass [howto, manual, or own class]). | |
latex_documents = [ | |
- ('index', 'RedBaron.tex', u'RedBaron Documentation', | |
- u'Laurent Peuch', 'manual'), | |
+ ('index', 'RedBaron.tex', 'RedBaron Documentation', | |
+ 'Laurent Peuch', 'manual'), | |
] | |
# The name of an image file (relative to this directory) to place at the top of | |
@@ -232,8 +232,8 @@ latex_documents = [ | |
# One entry per manual page. List of tuples | |
# (source start file, name, description, authors, manual section). | |
man_pages = [ | |
- ('index', 'redbaron', u'RedBaron Documentation', | |
- [u'Laurent Peuch'], 1) | |
+ ('index', 'redbaron', 'RedBaron Documentation', | |
+ ['Laurent Peuch'], 1) | |
] | |
# If true, show URL addresses after external links. | |
@@ -246,8 +246,8 @@ man_pages = [ | |
# (source start file, target name, title, author, | |
# dir menu entry, description, category) | |
texinfo_documents = [ | |
- ('index', 'RedBaron', u'RedBaron Documentation', | |
- u'Laurent Peuch', 'RedBaron', 'One line description of project.', | |
+ ('index', 'RedBaron', 'RedBaron Documentation', | |
+ 'Laurent Peuch', 'RedBaron', 'One line description of project.', | |
'Miscellaneous'), | |
] | |
diff --git a/redbaron.py b/redbaron.py | |
index 040bf40..4f71c8d 100644 | |
--- a/redbaron.py | |
+++ b/redbaron.py | |
@@ -2,17 +2,17 @@ import sys | |
import inspect | |
import itertools | |
from types import ModuleType | |
-from UserList import UserList | |
+from collections import UserList | |
import baron | |
def indent(line, indentation): | |
- return "\n".join(map(lambda x: indentation + x, line.split("\n"))) | |
+ return "\n".join([indentation + x for x in line.split("\n")]) | |
def to_node(node, parent=None, on_attribute=None): | |
- class_name = "".join(map(lambda x: x.capitalize(), node["type"].split("_"))) + "Node" | |
+ class_name = "".join([x.capitalize() for x in node["type"].split("_")]) + "Node" | |
if class_name in globals(): | |
return globals()[class_name](node, parent=parent, on_attribute=on_attribute) | |
else: | |
@@ -62,15 +62,15 @@ class NodeList(UserList): | |
def help(self, deep=2, with_formatting=False): | |
for num, i in enumerate(self.data): | |
- print num, "-----------------------------------------------------" | |
- print i.__help__(deep=deep, with_formatting=with_formatting) | |
+ print(num, "-----------------------------------------------------") | |
+ print(i.__help__(deep=deep, with_formatting=with_formatting)) | |
def __help__(self, deep=2, with_formatting=False): | |
return [x.__help__(deep=deep, with_formatting=with_formatting) for x in self.data] | |
def copy(self): | |
# XXX not very optimised but at least very simple | |
- return NodeList(map(to_node, self.fst())) | |
+ return NodeList(list(map(to_node, self.fst()))) | |
def next_generator(self): | |
# similary, NodeList will never have next items | |
@@ -108,7 +108,7 @@ class NodeList(UserList): | |
elif len(self.data) != 0: | |
self.data.append(to_node({"type": "comma", "first_formatting": [], "second_formatting": [{"type": "space", "value": " "}]}, parent=parent, on_attribute=on_attribute)) | |
- if isinstance(value, basestring): | |
+ if isinstance(value, str): | |
self.data.append(to_node(baron.parse(value)[0], parent=parent, on_attribute=on_attribute)) | |
elif isinstance(value, dict): | |
self.data.append(to_node(value, parent=parent, on_attribute=on_attribute)) | |
@@ -133,7 +133,7 @@ class Node(object): | |
self._str_keys = [] | |
self._list_keys = [] | |
self._dict_keys = [] | |
- for key, value in node.items(): | |
+ for key, value in list(node.items()): | |
if isinstance(value, dict): | |
if value: | |
setattr(self, key, to_node(value, parent=self, on_attribute=key)) | |
@@ -141,7 +141,7 @@ class Node(object): | |
setattr(self, key, None) | |
self._dict_keys.append(key) | |
elif isinstance(value, list): | |
- setattr(self, key, NodeList(map(lambda x: to_node(x, parent=self, on_attribute=key), value), parent=self)) | |
+ setattr(self, key, NodeList([to_node(x, parent=self, on_attribute=key) for x in value], parent=self)) | |
self._list_keys.append(key) | |
else: | |
setattr(self, key, value) | |
@@ -166,7 +166,7 @@ class Node(object): | |
return None | |
generator = itertools.dropwhile(lambda x: x is not self, in_list) | |
- generator.next() | |
+ next(generator) | |
return generator | |
@property | |
@@ -186,7 +186,7 @@ class Node(object): | |
return None | |
generator = itertools.dropwhile(lambda x: x is not self, reversed(in_list)) | |
- generator.next() | |
+ next(generator) | |
return generator | |
@property | |
@@ -286,16 +286,16 @@ class Node(object): | |
__call__ = find_all | |
def _generate_identifiers(self): | |
- return sorted(set(map(lambda x: x.lower(), [ | |
+ return sorted(set([x.lower() for x in [ | |
self.type, | |
self.__class__.__name__, | |
self.__class__.__name__.replace("Node", ""), | |
self.type + "_" | |
- ] + self._other_identifiers))) | |
+ ] + self._other_identifiers])) | |
def _get_helpers(self): | |
not_helpers = {'copy', 'dumps', 'find', 'findAll', 'find_all', 'fst', 'help', 'next_generator', 'previous_generator'} | |
- return filter(lambda x: not x.startswith("_") and x not in not_helpers and inspect.ismethod(getattr(self, x)), dir(self)) | |
+ return [x for x in dir(self) if not x.startswith("_") and x not in not_helpers and inspect.ismethod(getattr(self, x))] | |
def fst(self): | |
to_return = {} | |
@@ -314,7 +314,7 @@ class Node(object): | |
return baron.dumps(self.fst()) | |
def help(self, deep=2, with_formatting=False): | |
- print self.__help__(deep=deep, with_formatting=with_formatting) | |
+ print(self.__help__(deep=deep, with_formatting=with_formatting)) | |
def __help__(self, deep=2, with_formatting=False): | |
new_deep = deep - 1 if not isinstance(deep, bool) else deep | |
@@ -329,7 +329,7 @@ class Node(object): | |
to_join += ["%s ->\n %s" % (key, indent(getattr(self, key).__help__(deep=new_deep, with_formatting=with_formatting), " ").lstrip() if getattr(self, key) else getattr(self, key)) for key in self._dict_keys if "formatting" not in key] | |
# need to do this otherwise I end up with stacked quoted list | |
# example: value=[\'DottedAsNameNode(target=\\\'None\\\', as=\\\'False\\\', value=DottedNameNode(value=["NameNode(value=\\\'pouet\\\')"])] | |
- for key in filter(lambda x: "formatting" not in x, self._list_keys): | |
+ for key in [x for x in self._list_keys if "formatting" not in x]: | |
to_join.append(("%s ->" % key)) | |
for i in getattr(self, key): | |
to_join.append(" * " + indent(i.__help__(deep=new_deep, with_formatting=with_formatting), " ").lstrip()) | |
@@ -340,7 +340,7 @@ class Node(object): | |
to_join += ["%s=%s" % (key, repr(getattr(self, key))) for key in self._str_keys if key != "type" and "formatting" in key] | |
to_join += ["%s=%s" % (key, getattr(self, key).__help__(deep=new_deep, with_formatting=with_formatting) if getattr(self, key) else getattr(self, key)) for key in self._dict_keys if "formatting" in key] | |
- for key in filter(lambda x: "formatting" in x, self._list_keys): | |
+ for key in [x for x in self._list_keys if "formatting" in x]: | |
to_join.append(("%s ->" % key)) | |
for i in getattr(self, key): | |
to_join.append(" * " + indent(i.__help__(deep=new_deep, with_formatting=with_formatting), " ").lstrip()) | |
@@ -358,11 +358,11 @@ class Node(object): | |
if name == "init" or self.init: | |
return super(Node, self).__setattr__(name, value) | |
- if name in self._str_keys and not isinstance(value, (basestring, int)): | |
+ if name in self._str_keys and not isinstance(value, (str, int)): | |
value = str(value) | |
elif name in self._dict_keys: | |
- if isinstance(value, basestring): | |
+ if isinstance(value, str): | |
value = to_node(baron.parse(value)[0], parent=self, on_attribute=name) | |
if isinstance(value, dict): # assuming that we got some fst | |
@@ -375,8 +375,8 @@ class Node(object): | |
# TODO check attribution to raise error/warning? | |
elif name in self._list_keys: | |
- if isinstance(value, basestring): | |
- value = NodeList(map(lambda x: to_node(x, parent=self, on_attribute=name), baron.parse(value))) | |
+ if isinstance(value, str): | |
+ value = NodeList([to_node(x, parent=self, on_attribute=name) for x in baron.parse(value)]) | |
elif isinstance(value, dict): # assuming that we got some fst | |
# also assuming the user do strange things | |
@@ -391,7 +391,7 @@ class Node(object): | |
# assume the user can pass a list of random stuff | |
new_value = NodeList() | |
for i in value: | |
- if isinstance(i, basestring): | |
+ if isinstance(i, str): | |
new_value.append(to_node(baron.parse(i)[0], parent=self, on_attribute=name)) | |
elif isinstance(i, dict): # assuming that we got some fst | |
@@ -488,7 +488,7 @@ class DotNode(Node): | |
class RedBaron(NodeList): | |
def __init__(self, source_code): | |
- self.data = map(lambda x: to_node(x, parent=self, on_attribute="root"), baron.parse(source_code)) | |
+ self.data = [to_node(x, parent=self, on_attribute="root") for x in baron.parse(source_code)] | |
# enter the black magic realm, beware of what you might find | |
diff --git a/tests/test_redbaron.py b/tests/test_redbaron.py | |
index e620be9..6d2a2ff 100644 | |
--- a/tests/test_redbaron.py | |
+++ b/tests/test_redbaron.py | |
@@ -178,8 +178,8 @@ def test_parent(): | |
red = RedBaron("[1, 2, 3]") | |
assert red.parent is None | |
assert red[0].parent is red | |
- assert map(lambda x: x.parent, red[0].value) == [red[0]]*5 | |
- assert map(lambda x: x.on_attribute, red[0].value) == ["value"]*5 | |
+ assert [x.parent for x in red[0].value] == [red[0]]*5 | |
+ assert [x.on_attribute for x in red[0].value] == ["value"]*5 | |
def test_parent_copy(): | |
@@ -201,26 +201,26 @@ def test_parent_assign(): | |
assert red[0].target.on_attribute == "target" | |
red = RedBaron("[1, 2, 3]") | |
- assert map(lambda x: x.parent, red[0].value) == [red[0]]*5 | |
- assert map(lambda x: x.on_attribute, red[0].value) == ["value"]*5 | |
+ assert [x.parent for x in red[0].value] == [red[0]]*5 | |
+ assert [x.on_attribute for x in red[0].value] == ["value"]*5 | |
red[0].value = "pouet" | |
- assert map(lambda x: x.parent, red[0].value) == [red[0]] | |
- assert map(lambda x: x.on_attribute, red[0].value) == ["value"] | |
+ assert [x.parent for x in red[0].value] == [red[0]] | |
+ assert [x.on_attribute for x in red[0].value] == ["value"] | |
red[0].value = ["pouet"] | |
- assert map(lambda x: x.parent, red[0].value) == [red[0]] | |
- assert map(lambda x: x.on_attribute, red[0].value) == ["value"] | |
+ assert [x.parent for x in red[0].value] == [red[0]] | |
+ assert [x.on_attribute for x in red[0].value] == ["value"] | |
red[0].value = {"type": "name", "value": "plop"} | |
- assert map(lambda x: x.parent, red[0].value) == [red[0]] | |
- assert map(lambda x: x.on_attribute, red[0].value) == ["value"] | |
+ assert [x.parent for x in red[0].value] == [red[0]] | |
+ assert [x.on_attribute for x in red[0].value] == ["value"] | |
red[0].value = [{"type": "name", "value": "plop"}] | |
- assert map(lambda x: x.parent, red[0].value) == [red[0]] | |
- assert map(lambda x: x.on_attribute, red[0].value) == ["value"] | |
+ assert [x.parent for x in red[0].value] == [red[0]] | |
+ assert [x.on_attribute for x in red[0].value] == ["value"] | |
red[0].value = NameNode({"type": "name", "value": "pouet"}) | |
- assert map(lambda x: x.parent, red[0].value) == [red[0]] | |
- assert map(lambda x: x.on_attribute, red[0].value) == ["value"] | |
+ assert [x.parent for x in red[0].value] == [red[0]] | |
+ assert [x.on_attribute for x in red[0].value] == ["value"] | |
red[0].value = [NameNode({"type": "name", "value": "pouet"})] | |
- assert map(lambda x: x.parent, red[0].value) == [red[0]] | |
- assert map(lambda x: x.on_attribute, red[0].value) == ["value"] | |
+ assert [x.parent for x in red[0].value] == [red[0]] | |
+ assert [x.on_attribute for x in red[0].value] == ["value"] | |
def test_node_next(): | |
@@ -436,7 +436,7 @@ def test_indent_root(): | |
red = RedBaron("pouet") | |
assert red[0].indentation == "" | |
red = RedBaron("pouet\nplop\npop") | |
- assert map(lambda x: x.indentation, red) == ["", "", "", "", ""] | |
+ assert [x.indentation for x in red] == ["", "", "", "", ""] | |
def test_in_while(): | |
-- | |
1.8.3.4 | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From a2a3db06f0ecbb0b191f7dd552e8ad2b278cacd4 Mon Sep 17 00:00:00 2001 | |
From: Pierre Penninckx | |
Date: Wed, 9 Apr 2014 14:39:37 +0200 | |
Subject: [PATCH 2/2] fix recursive include | |
--- | |
redbaron.py | 1 + | |
1 file changed, 1 insertion(+) | |
diff --git a/redbaron.py b/redbaron.py | |
index 4f71c8d..141f47e 100644 | |
--- a/redbaron.py | |
+++ b/redbaron.py | |
@@ -527,6 +527,7 @@ class BlackMagicImportHook(ModuleType): | |
self._env = MissingNodesBuilder(globals(), baked_args) | |
def __getattr__(self, name): | |
+ if name == "_env": raise AttributeError | |
return self._env[name] | |
def __setattr__(self, name, value): | |
-- | |
1.8.3.4 | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 31e600631e3e315a85e6f416d33499271b574f42 Mon Sep 17 00:00:00 2001 | |
From: Pierre Penninckx | |
Date: Tue, 8 Apr 2014 14:20:55 +0200 | |
Subject: [PATCH 2/3] use absolute imports to avoid problems | |
--- | |
baron/__init__.py | 6 +++--- | |
baron/baron.py | 16 ++++++++-------- | |
baron/formatting_grouper.py | 2 +- | |
baron/grammator.py | 20 ++++++++++---------- | |
baron/grammator_imports.py | 2 +- | |
baron/grammator_primitives.py | 2 +- | |
baron/grouper.py | 2 +- | |
baron/helpers.py | 2 +- | |
baron/indentation_marker.py | 2 +- | |
baron/inner_formatting_grouper.py | 2 +- | |
baron/parser.py | 2 +- | |
baron/spliter.py | 2 +- | |
12 files changed, 30 insertions(+), 30 deletions(-) | |
diff --git a/baron/__init__.py b/baron/__init__.py | |
index 87291ec..c242b0c 100644 | |
--- a/baron/__init__.py | |
+++ b/baron/__init__.py | |
@@ -1,4 +1,4 @@ | |
-from . import grouper | |
-from . import spliter | |
-from .baron import parse, tokenize | |
+from baron import grouper | |
+from baron import spliter | |
+from baron.baron import parse, tokenize | |
from .dumper import dumps | |
diff --git a/baron/baron.py b/baron/baron.py | |
index 830e5ae..6cb3f6d 100644 | |
--- a/baron/baron.py | |
+++ b/baron/baron.py | |
@@ -2,14 +2,14 @@ from ast import parse as python_ast_parse | |
from rply.errors import ParsingError | |
-from .utils import PrintFunctionImportFinder | |
-from .spliter import split | |
-from .grouper import group | |
-from .tokenizer import tokenize as _tokenize | |
-from .formatting_grouper import group as space_group | |
-from .grammator import generate_parse | |
-from .indentation_marker import mark_indentation | |
-from .inner_formatting_grouper import group as inner_group | |
+from baron.utils import PrintFunctionImportFinder | |
+from baron.spliter import split | |
+from baron.grouper import group | |
+from baron.tokenizer import tokenize as _tokenize | |
+from baron.formatting_grouper import group as space_group | |
+from baron.grammator import generate_parse | |
+from baron.indentation_marker import mark_indentation | |
+from baron.inner_formatting_grouper import group as inner_group | |
parse_tokens = generate_parse(False) | |
diff --git a/baron/formatting_grouper.py b/baron/formatting_grouper.py | |
index 8dc691e..b07a5a7 100644 | |
--- a/baron/formatting_grouper.py | |
+++ b/baron/formatting_grouper.py | |
@@ -1,4 +1,4 @@ | |
-from .utils import FlexibleIterator | |
+from baron.utils import FlexibleIterator | |
class UnExpectedSpaceToken(Exception): | |
pass | |
diff --git a/baron/grammator.py b/baron/grammator.py | |
index 731dbc4..2a4bb79 100644 | |
--- a/baron/grammator.py | |
+++ b/baron/grammator.py | |
@@ -1,13 +1,13 @@ | |
-from .token import BaronToken | |
-from .parser import BaronParserGenerator | |
- | |
-from .tokenizer import TOKENS, KEYWORDS, tokenize | |
-from .utils import create_node_from_token | |
-from .grammator_imports import include_imports | |
-from .grammator_control_structures import include_control_structures | |
-from .grammator_primitives import include_primivites | |
-from .grammator_operators import include_operators | |
-from .grammator_data_structures import include_data_structures | |
+from baron.token import BaronToken | |
+from baron.parser import BaronParserGenerator | |
+ | |
+from baron.tokenizer import TOKENS, KEYWORDS, tokenize | |
+from baron.utils import create_node_from_token | |
+from baron.grammator_imports import include_imports | |
+from baron.grammator_control_structures import include_control_structures | |
+from baron.grammator_primitives import include_primivites | |
+from baron.grammator_operators import include_operators | |
+from baron.grammator_data_structures import include_data_structures | |
def generate_parse(print_function): | |
diff --git a/baron/grammator_imports.py b/baron/grammator_imports.py | |
index 57415c7..42cade6 100644 | |
--- a/baron/grammator_imports.py | |
+++ b/baron/grammator_imports.py | |
@@ -1,4 +1,4 @@ | |
-from .utils import create_node_from_token | |
+from baron.utils import create_node_from_token | |
def include_imports(pg): | |
@pg.production("small_stmt : import") | |
diff --git a/baron/grammator_primitives.py b/baron/grammator_primitives.py | |
index 221d282..71b69b0 100644 | |
--- a/baron/grammator_primitives.py | |
+++ b/baron/grammator_primitives.py | |
@@ -1,4 +1,4 @@ | |
-from .utils import create_node_from_token | |
+from baron.utils import create_node_from_token | |
def include_primivites(pg, print_function): | |
diff --git a/baron/grouper.py b/baron/grouper.py | |
index d052f5a..f2e33bd 100644 | |
--- a/baron/grouper.py | |
+++ b/baron/grouper.py | |
@@ -1,7 +1,7 @@ | |
# encoding: utf-8 | |
import re | |
-from .utils import FlexibleIterator | |
+from baron.utils import FlexibleIterator | |
to_group = ( | |
("+", "="), | |
diff --git a/baron/helpers.py b/baron/helpers.py | |
index aa93cf5..2680b76 100644 | |
--- a/baron/helpers.py | |
+++ b/baron/helpers.py | |
@@ -1,5 +1,5 @@ | |
import json | |
-from . import parse | |
+from baron.baron import parse | |
def show(source_code): | |
diff --git a/baron/indentation_marker.py b/baron/indentation_marker.py | |
index b1be09f..69c0281 100644 | |
--- a/baron/indentation_marker.py | |
+++ b/baron/indentation_marker.py | |
@@ -1,4 +1,4 @@ | |
-from .utils import FlexibleIterator | |
+from baron.utils import FlexibleIterator | |
""" | |
Objectif: add an INDENT token and a DEDENT token arround every block | |
diff --git a/baron/inner_formatting_grouper.py b/baron/inner_formatting_grouper.py | |
index 15dc1ff..dffc1c9 100644 | |
--- a/baron/inner_formatting_grouper.py | |
+++ b/baron/inner_formatting_grouper.py | |
@@ -1,4 +1,4 @@ | |
-from .utils import FlexibleIterator | |
+from baron.utils import FlexibleIterator | |
class UnExpectedFormattingToken(Exception): | |
pass | |
diff --git a/baron/parser.py b/baron/parser.py | |
index 96d0775..47059fb 100644 | |
--- a/baron/parser.py | |
+++ b/baron/parser.py | |
@@ -4,7 +4,7 @@ import stat | |
import tempfile | |
import warnings | |
-from .token import BaronToken | |
+from baron.token import BaronToken | |
from rply import ParserGenerator | |
from rply.parser import LRParser | |
diff --git a/baron/spliter.py b/baron/spliter.py | |
index 0695b30..5b2c967 100644 | |
--- a/baron/spliter.py | |
+++ b/baron/spliter.py | |
@@ -1,5 +1,5 @@ | |
import string | |
-from .utils import FlexibleIterator | |
+from baron.utils import FlexibleIterator | |
def split(sequence): | |
-- | |
1.8.3.4 | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 13c6567d12ee7f6044c85ff39217feadf71e5f4d Mon Sep 17 00:00:00 2001 | |
From: Pierre Penninckx | |
Date: Wed, 9 Apr 2014 16:39:21 +0200 | |
Subject: [PATCH 3/3] modify tests to parse python3 grammar | |
--- | |
tests/test_dumper.py | 34 +++++++++++----------------------- | |
1 file changed, 11 insertions(+), 23 deletions(-) | |
diff --git a/tests/test_dumper.py b/tests/test_dumper.py | |
index f85b9bb..c03462f 100644 | |
--- a/tests/test_dumper.py | |
+++ b/tests/test_dumper.py | |
@@ -69,11 +69,11 @@ def test_from_import_special_notation(): | |
def test_print_empty(): | |
- check_dumps("print") | |
+ check_dumps("print()") | |
def test_print(): | |
- check_dumps("print pouet") | |
+ check_dumps("print(pouet)") | |
def test_print_madness(): | |
@@ -157,10 +157,6 @@ def test_raw_string(): | |
check_dumps("r'pouet'") | |
-def test_unicode_raw_string(): | |
- check_dumps("ur'pouet'") | |
- | |
- | |
def test_binary_string(): | |
check_dumps("b'pouet'") | |
@@ -197,10 +193,6 @@ def test_try_except(): | |
check_dumps("try : pass\nexcept Exception : pass") | |
-def test_try_except_comma(): | |
- check_dumps("try : pass\nexcept Exception , d : pass") | |
- | |
- | |
def test_try_except_as(): | |
check_dumps("try : pass\nexcept Exception as d : pass") | |
@@ -325,10 +317,6 @@ def test_associative_parenthesis(): | |
check_dumps("( \n ( a ) + ( 1 * 4 )\n ) ") | |
-def test_fplist(): | |
- check_dumps("def a((b, c)): pass") | |
- | |
- | |
def test_break(): | |
check_dumps("break") | |
@@ -345,8 +333,8 @@ def test_continue(): | |
def test_raise(): | |
check_dumps("raise") | |
check_dumps("raise a") | |
- check_dumps("raise a , b") | |
- check_dumps("raise a , b , c") | |
+ check_dumps("raise a(b)") | |
+ check_dumps("raise a(b).with_traceback(tb)") | |
def test_del(): | |
@@ -426,7 +414,7 @@ def test_hexa(): | |
def test_octa(): | |
- check_dumps("0123") | |
+ check_dumps("0o123") | |
def test_binary(): | |
@@ -450,15 +438,15 @@ def test_semicolon(): | |
def test_exec(): | |
- check_dumps("exec a") | |
+ check_dumps("exec(a)") | |
def test_exec_globals(): | |
- check_dumps("exec a in b") | |
+ check_dumps("exec(a,b)") | |
def test_exec_globals_locals(): | |
- check_dumps("exec a in b , c") | |
+ check_dumps("exec(a,b,c)") | |
def test_global(): | |
@@ -470,7 +458,7 @@ def test_global_more(): | |
def test_ellipsis(): | |
- check_dumps("a[ . . .]") | |
+ check_dumps("a[...]") | |
def test_yield_atom_empty(): | |
@@ -482,7 +470,7 @@ def test_yield_atom(): | |
def test_repr(): | |
- check_dumps("` a `") | |
+ check_dumps("repr(a)") | |
def test_comment_special_case(): | |
@@ -498,7 +486,7 @@ def test_getitem_special_case(): | |
def test_print_tuple(): | |
- check_dumps("print(pouet, pouet)") | |
+ check_dumps("print((pouet, pouet))") | |
def test_raise_special(): | |
-- | |
1.8.3.4 | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment