cpython: 80f6f77a7cc3 (original) (raw)
Mercurial > cpython
changeset 99257:80f6f77a7cc3 3.5
Issue #25626: Change zlib to accept Py_ssize_t and cap to UINT_MAX The underlying zlib library stores sizes in “unsigned int”. The corresponding Python parameters are all sizes of buffers filled in by zlib, so it is okay to reduce higher values to the UINT_MAX internal cap. OverflowError is still raised for sizes that do not fit in Py_ssize_t. Sizes are now limited to Py_ssize_t rather than unsigned long, because Python byte strings cannot be larger than Py_ssize_t. Previously this could result in a SystemError on 32-bit platforms. This resolves a regression in the gzip module when reading more than UINT_MAX or LONG_MAX bytes in one call, introduced by revision 62723172412c. [#25626]
Martin Panter vadmium+py@gmail.com | |
---|---|
date | Fri, 20 Nov 2015 08:13:35 +0000 |
parents | c3cc5d70a6bf |
children | afa1b6cd77a5 df11d58fce00 |
files | Doc/howto/clinic.rst Lib/test/support/__init__.py Lib/test/test_gzip.py Lib/test/test_zlib.py Misc/NEWS Modules/clinic/zlibmodule.c.h Modules/zlibmodule.c |
diffstat | 7 files changed, 120 insertions(+), 47 deletions(-)[+] [-] Doc/howto/clinic.rst 16 Lib/test/support/__init__.py 13 Lib/test/test_gzip.py 9 Lib/test/test_zlib.py 56 Misc/NEWS 7 Modules/clinic/zlibmodule.c.h 8 Modules/zlibmodule.c 58 |
line wrap: on
line diff
--- a/Doc/howto/clinic.rst +++ b/Doc/howto/clinic.rst @@ -1249,18 +1249,18 @@ Here's the simplest example of a custom /*[python input]
converter = 'uint_converter'[](#l1.10)
converter = 'capped_uint_converter'[](#l1.11)
[python start generated code]*/
-This block adds a converter to Argument Clinic named uint
. Parameters
-declared as uint
will be declared as type unsigned int
, and will
-be parsed by the 'O&'
format unit, which will call the uint_converter
-converter function.
-uint
variables automatically support default values.
+This block adds a converter to Argument Clinic named capped_uint
. Parameters
+declared as capped_uint
will be declared as type unsigned int
, and will
+be parsed by the 'O&'
format unit, which will call the
+capped_uint_converter
converter function. capped_uint
variables
+automatically support default values.
More sophisticated custom converters can insert custom C code to
handle initialization and cleanup.
--- a/Lib/test/support/init.py +++ b/Lib/test/support/init.py @@ -1608,12 +1608,15 @@ class _MemoryWatchdog: def bigmemtest(size, memuse, dry_run=True): """Decorator for bigmem tests.
- 'minsize' is the minimum useful size for the test (in arbitrary,
- test-interpreted units.) 'memuse' is the number of 'bytes per size' for
- the test, or a good estimate of it.
- 'size' is a requested size for the test (in arbitrary, test-interpreted
- units.) 'memuse' is the number of bytes per unit for the test, or a good
- estimate of it. For example, a test that needs two byte buffers, of 4 GiB
- each, could be decorated with @bigmemtest(size=_4G, memuse=2).
- The 'size' argument is normally passed to the decorated test method as an
- extra argument. If 'dry_run' is true, the value passed to the test method
- may be less than the requested value. If 'dry_run' is false, it means the
- test doesn't support dummy runs when -M is not specified. """ def decorator(f): def wrapper(self):
--- a/Lib/test/test_gzip.py +++ b/Lib/test/test_gzip.py @@ -3,6 +3,7 @@ import unittest from test import support +from test.support import bigmemtest, _4G import os import io import struct @@ -116,6 +117,14 @@ class TestGzip(BaseTest): self.assertEqual(f.tell(), nread) self.assertEqual(b''.join(blocks), data1 * 50)
- @bigmemtest(size=_4G, memuse=1)
- def test_read_large(self, size):
# Read chunk size over UINT_MAX should be supported, despite zlib's[](#l3.17)
# limitation per low-level call[](#l3.18)
compressed = gzip.compress(data1, compresslevel=1)[](#l3.19)
f = gzip.GzipFile(fileobj=io.BytesIO(compressed), mode='rb')[](#l3.20)
self.assertEqual(f.read(size), data1)[](#l3.21)
+ def test_io_on_closed_object(self): # Test that I/O operations on closed GzipFile objects raise a # ValueError, just like the corresponding functions on file objects.
--- a/Lib/test/test_zlib.py +++ b/Lib/test/test_zlib.py @@ -122,11 +122,17 @@ class ExceptionTestCase(unittest.TestCas self.assertRaises(ValueError, zlib.decompressobj().flush, 0) self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
- @support.cpython_only
- def test_overflow(self):
with self.assertRaisesRegex(OverflowError, 'int too large'):[](#l4.9)
zlib.decompress(b'', 15, sys.maxsize + 1)[](#l4.10)
with self.assertRaisesRegex(OverflowError, 'int too large'):[](#l4.11)
zlib.decompressobj().flush(sys.maxsize + 1)[](#l4.12)
+ class BaseCompressTestCase(object): def check_big_compress_buffer(self, size, compress_func): _1M = 1024 * 1024
fmt = "%%0%dx" % (2 * _1M)[](#l4.18) # Generate 10MB worth of random, and expand it by repeating it.[](#l4.19) # The assumption is that zlib's memory is not big enough to exploit[](#l4.20) # such spread out redundancy.[](#l4.21)
@@ -196,6 +202,18 @@ class CompressTestCase(BaseCompressTestC finally: data = None
- @bigmemtest(size=_4G, memuse=1)
- def test_large_bufsize(self, size):
# Test decompress(bufsize) parameter greater than the internal limit[](#l4.28)
data = HAMLET_SCENE * 10[](#l4.29)
compressed = zlib.compress(data, 1)[](#l4.30)
self.assertEqual(zlib.decompress(compressed, 15, size), data)[](#l4.31)
- def test_custom_bufsize(self):
data = HAMLET_SCENE * 10[](#l4.34)
compressed = zlib.compress(data, 1)[](#l4.35)
self.assertEqual(zlib.decompress(compressed, 15, CustomInt()), data)[](#l4.36)
+ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase): # Test compression object @@ -364,6 +382,21 @@ class CompressObjectTestCase(BaseCompres self.assertRaises(ValueError, dco.decompress, b"", -1) self.assertEqual(b'', dco.unconsumed_tail)
- def test_maxlen_large(self):
# Sizes up to sys.maxsize should be accepted, although zlib is[](#l4.46)
# internally limited to expressing sizes with unsigned int[](#l4.47)
data = HAMLET_SCENE * 10[](#l4.48)
self.assertGreater(len(data), zlib.DEF_BUF_SIZE)[](#l4.49)
compressed = zlib.compress(data, 1)[](#l4.50)
dco = zlib.decompressobj()[](#l4.51)
self.assertEqual(dco.decompress(compressed, sys.maxsize), data)[](#l4.52)
- def test_maxlen_custom(self):
data = HAMLET_SCENE * 10[](#l4.55)
compressed = zlib.compress(data, 1)[](#l4.56)
dco = zlib.decompressobj()[](#l4.57)
self.assertEqual(dco.decompress(compressed, CustomInt()), data[:100])[](#l4.58)
+ def test_clear_unconsumed_tail(self): # Issue #12050: calling decompress() without providing max_length # should clear the unconsumed_tail attribute. @@ -537,6 +570,22 @@ class CompressObjectTestCase(BaseCompres data = zlib.compress(input2) self.assertEqual(dco.flush(), input1[1:])
- @bigmemtest(size=_4G, memuse=1)
- def test_flush_large_length(self, size):
# Test flush(length) parameter greater than internal limit UINT_MAX[](#l4.69)
input = HAMLET_SCENE * 10[](#l4.70)
data = zlib.compress(input, 1)[](#l4.71)
dco = zlib.decompressobj()[](#l4.72)
dco.decompress(data, 1)[](#l4.73)
self.assertEqual(dco.flush(size), input[1:])[](#l4.74)
- def test_flush_custom_length(self):
input = HAMLET_SCENE * 10[](#l4.77)
data = zlib.compress(input, 1)[](#l4.78)
dco = zlib.decompressobj()[](#l4.79)
dco.decompress(data, 1)[](#l4.80)
self.assertEqual(dco.flush(CustomInt()), input[1:])[](#l4.81)
+ @requires_Compress_copy def test_compresscopy(self): # Test copying a compression object @@ -725,5 +774,10 @@ LAERTES """ +class CustomInt:
+ + if name == "main": unittest.main()
--- a/Misc/NEWS +++ b/Misc/NEWS @@ -77,6 +77,13 @@ Core and Builtins Library ------- +- Issue #25626: Change three zlib functions to accept sizes that fit in
- Py_ssize_t, but internally cap those sizes to UINT_MAX. This resolves a
- regression in 3.5 where GzipFile.read() failed to read chunks larger than 2
- or 4 GiB. The change affects the zlib.Decompress.decompress() max_length
- parameter, the zlib.decompress() bufsize parameter, and the
- zlib.Decompress.flush() length parameter. +
- Issue #25583: Avoid incorrect errors raised by os.makedirs(exist_ok=True) when the OS gives priority to errors such as EACCES over EEXIST.
--- a/Modules/clinic/zlibmodule.c.h +++ b/Modules/clinic/zlibmodule.c.h @@ -68,7 +68,7 @@ zlib_decompress(PyModuleDef module, PyO unsigned int bufsize = DEF_BUF_SIZE; if (!PyArg_ParseTuple(args, "y|iO&:decompress",
&data, &wbits, uint_converter, &bufsize))[](#l6.7)
return_value = zlib_decompress_impl(module, &data, wbits, bufsize); @@ -242,7 +242,7 @@ zlib_Decompress_decompress(compobject s unsigned int max_length = 0; if (!PyArg_ParseTuple(args, "y|O&:decompress",&data, &wbits, capped_uint_converter, &bufsize))[](#l6.8) goto exit;[](#l6.9)
&data, uint_converter, &max_length))[](#l6.16)
return_value = zlib_Decompress_decompress_impl(self, &data, max_length); @@ -353,7 +353,7 @@ zlib_Decompress_flush(compobject *self, unsigned int length = DEF_BUF_SIZE; if (!PyArg_ParseTuple(args, "|O&:flush",&data, capped_uint_converter, &max_length))[](#l6.17) goto exit;[](#l6.18)
uint_converter, &length))[](#l6.25)
return_value = zlib_Decompress_flush_impl(self, length); @@ -438,4 +438,4 @@ exit:capped_uint_converter, &length))[](#l6.26) goto exit;[](#l6.27)
#ifndef ZLIB_COMPRESS_COPY_METHODDEF #define ZLIB_COMPRESS_COPY_METHODDEF #endif /* !defined(ZLIB_COMPRESS_COPY_METHODDEF) / -/[clinic end generated code: output=56ed1147bbbb4788 input=a9049054013a1b77]/ +/[clinic end generated code: output=7734aec079550bc8 input=a9049054013a1b77]*/
--- a/Modules/zlibmodule.c +++ b/Modules/zlibmodule.c @@ -226,42 +226,42 @@ zlib_compress_impl(PyModuleDef module, /[python input] -class uint_converter(CConverter): +class capped_uint_converter(CConverter): type = 'unsigned int'
- converter = 'capped_uint_converter' c_ignored_default = "0" [python start generated code]/ -/[python end generated code: output=da39a3ee5e6b4b0d input=22263855f7a3ebfd]/ +/[python end generated code: output=da39a3ee5e6b4b0d input=35521e4e733823c7]*/ static int -uint_converter(PyObject *obj, void *ptr) +capped_uint_converter(PyObject *obj, void *ptr) {
- val = PyLong_AsLong(obj);
- if (val == -1 && PyErr_Occurred()) {
uval = PyLong_AsUnsignedLong(obj);[](#l7.29)
if (uval == (unsigned long)-1 && PyErr_Occurred())[](#l7.30)
return 0;[](#l7.31)
- else {
if (val < 0) {[](#l7.37)
PyErr_SetString(PyExc_ValueError,[](#l7.38)
"value must be positive");[](#l7.39)
return 0;[](#l7.40)
}[](#l7.41)
uval = (unsigned long)val;[](#l7.42)
- val = PyLong_AsSsize_t(long_obj);
- Py_DECREF(long_obj);
- if (val == -1 && PyErr_Occurred()) {
} -return 0;[](#l7.46)
- if (uval > UINT_MAX) {
PyErr_SetString(PyExc_OverflowError,[](#l7.50)
"Python int too large for C unsigned int");[](#l7.51)
- if (val < 0) {
PyErr_SetString(PyExc_ValueError,[](#l7.53)
}"value must be positive");[](#l7.54) return 0;[](#l7.55)
- if ((size_t)val > UINT_MAX) {
*(unsigned int *)ptr = UINT_MAX;[](#l7.60)
- }
- else {
*(unsigned int *)ptr = Py_SAFE_DOWNCAST(val, Py_ssize_t,[](#l7.63)
unsigned int);[](#l7.64)
- } return 1; }
@@ -272,7 +272,7 @@ zlib.decompress Compressed data. wbits: int(c_default="MAX_WBITS") = MAX_WBITS The window buffer size.
- bufsize: capped_uint(c_default="DEF_BUF_SIZE") = DEF_BUF_SIZE The initial output buffer size. / @@ -282,7 +282,7 @@ Returns a bytes object containing the un static PyObject * zlib_decompress_impl(PyModuleDef *module, Py_buffer *data, int wbits, unsigned int bufsize)
-/[clinic end generated code: output=444d0987f3429574 input=0f4b9abb7103f50e]/ +/[clinic end generated code: output=444d0987f3429574 input=da095118b3243b27]/ { PyObject *result_str = NULL; Byte *input; @@ -691,7 +691,7 @@ zlib.Decompress.decompress data: Py_buffer The binary data to decompress.
- max_length: capped_uint = 0 The maximum allowable length of the decompressed data. Unconsumed input data will be stored in the unconsumed_tail attribute.
@@ -707,7 +707,7 @@ Call the flush() method to clear these b static PyObject zlib_Decompress_decompress_impl(compobject self, Py_buffer data, unsigned int max_length) -/[clinic end generated code: output=b82e2a2c19f5fe7b input=02cfc047377cec86]/ +/[clinic end generated code: output=b82e2a2c19f5fe7b input=68b6508ab07c2cf0]/ { int err; unsigned int old_length, length = DEF_BUF_SIZE; @@ -1048,7 +1048,7 @@ error: /[clinic input] zlib.Decompress.flush
- length: capped_uint(c_default="DEF_BUF_SIZE") = zlib.DEF_BUF_SIZE the initial size of the output buffer. / @@ -1057,7 +1057,7 @@ Return a bytes object containing any rem static PyObject zlib_Decompress_flush_impl(compobject self, unsigned int length) -/[clinic end generated code: output=db6fb753ab698e22 input=1580956505978993]/ +/[clinic end generated code: output=db6fb753ab698e22 input=1bb961eb21b62aa0]/ { int err; unsigned int new_length;