@@ -638,7 +638,7 @@ |
|
|
638 |
638 |
from test import support |
639 |
639 |
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP, |
640 |
640 |
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding, |
641 |
|
-open as tokenize_open) |
|
641 |
+open as tokenize_open, Untokenizer) |
642 |
642 |
from io import BytesIO |
643 |
643 |
from unittest import TestCase |
644 |
644 |
import os, sys, glob |
@@ -1153,6 +1153,19 @@ def test_pathological_trailing_whitespace(self): |
|
|
1153 |
1153 |
# See http://bugs.python.org/issue16152 |
1154 |
1154 |
self.assertExactTypeEqual('@ ', token.AT) |
1155 |
1155 |
|
|
1156 |
+class UntokenizeTest(TestCase): |
|
1157 |
+ |
|
1158 |
+def test_bad_input_order(self): |
|
1159 |
+u = Untokenizer() |
|
1160 |
+u.prev_row = 2 |
|
1161 |
+u.prev_col = 2 |
|
1162 |
+with self.assertRaises(ValueError) as cm: |
|
1163 |
+u.add_whitespace((1,3)) |
|
1164 |
+self.assertEqual(cm.exception.args[0], |
|
1165 |
+'start (1,3) precedes previous end (2,2)') |
|
1166 |
+self.assertRaises(ValueError, u.add_whitespace, (2,1)) |
|
1167 |
+ |
|
1168 |
+ |
1156 |
1169 |
__test__ = {"doctests" : doctests, 'decistmt': decistmt} |
1157 |
1170 |
|
1158 |
1171 |
def test_main(): |
@@ -1162,6 +1175,7 @@ def test_main(): |
|
|
1162 |
1175 |
support.run_unittest(Test_Tokenize) |
1163 |
1176 |
support.run_unittest(TestDetectEncoding) |
1164 |
1177 |
support.run_unittest(TestTokenize) |
|
1178 |
+support.run_unittest(UntokenizeTest) |
1165 |
1179 |
|
1166 |
1180 |
if __name__ == "__main__": |
1167 |
1181 |
test_main() |