cpython: 0f0e9b7d4f1d (original) (raw)
Mercurial > cpython
changeset 89352:0f0e9b7d4f1d 2.7
Issue #9974: When untokenizing, use row info to insert backslash+newline. Original patches by A. Kuchling and G. Rees (#12691). [#9974]
Terry Jan Reedy tjreedy@udel.edu | |
---|---|
date | Sun, 23 Feb 2014 23:32:59 -0500 |
parents | a9464e900705 |
children | 0e77dd295a88 fadde95c134e |
files | Lib/test/test_tokenize.py Lib/tokenize.py |
diffstat | 2 files changed, 21 insertions(+), 1 deletions(-)[+] [-] Lib/test/test_tokenize.py 16 Lib/tokenize.py 6 |
line wrap: on
line diff
--- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -4,7 +4,7 @@ Tests for the tokenize module. >>> import glob, random, sys The tests can be really simple. Given a small fragment of source -code, print out a table with tokens. The ENDMARK is omitted for +code, print out a table with tokens. The ENDMARKER is omitted for brevity. >>> dump_tokens("1 + 1") @@ -618,6 +618,7 @@ def decistmt(s): class UntokenizeTest(TestCase): def test_bad_input_order(self):
# raise if previous row[](#l1.16) u = Untokenizer()[](#l1.17) u.prev_row = 2[](#l1.18) u.prev_col = 2[](#l1.19)
@@ -625,8 +626,21 @@ class UntokenizeTest(TestCase): u.add_whitespace((1,3)) self.assertEqual(cm.exception.args[0], 'start (1,3) precedes previous end (2,2)')
# raise if previous column in row[](#l1.24) self.assertRaises(ValueError, u.add_whitespace, (2,1))[](#l1.25)
- def test_backslash_continuation(self):
# The problem is that <whitespace>\<newline> leaves no token[](#l1.28)
u = Untokenizer()[](#l1.29)
u.prev_row = 1[](#l1.30)
u.prev_col = 1[](#l1.31)
u.tokens = [][](#l1.32)
u.add_whitespace((2, 0))[](#l1.33)
self.assertEqual(u.tokens, ['\\\n'])[](#l1.34)
u.prev_row = 2[](#l1.35)
u.add_whitespace((4, 4))[](#l1.36)
self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' '])[](#l1.37)
+ def test_iter_compat(self): u = Untokenizer() token = (NAME, 'Hello')
--- a/Lib/tokenize.py +++ b/Lib/tokenize.py @@ -188,6 +188,10 @@ class Untokenizer: if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col))
row_offset = row - self.prev_row[](#l2.7)
if row_offset:[](#l2.8)
self.tokens.append("\\\n" * row_offset)[](#l2.9)
self.prev_col = 0[](#l2.10) col_offset = col - self.prev_col[](#l2.11) if col_offset:[](#l2.12) self.tokens.append(" " * col_offset)[](#l2.13)
@@ -199,6 +203,8 @@ class Untokenizer: self.compat(t, it) break tok_type, token, start, end, line = t
if tok_type == ENDMARKER:[](#l2.18)
break[](#l2.19) self.add_whitespace(start)[](#l2.20) self.tokens.append(token)[](#l2.21) self.prev_row, self.prev_col = end[](#l2.22)