[3.6] bpo-26762, bpo-31019: Backport multiprocessing fixes from maste… · python/cpython@d0adfb2 (original) (raw)
`@@ -32,11 +32,12 @@
`
32
32
`# without thread support.
`
33
33
`import threading
`
34
34
``
35
``
`-
import multiprocessing.dummy
`
36
35
`import multiprocessing.connection
`
37
``
`-
import multiprocessing.managers
`
``
36
`+
import multiprocessing.dummy
`
38
37
`import multiprocessing.heap
`
``
38
`+
import multiprocessing.managers
`
39
39
`import multiprocessing.pool
`
``
40
`+
import multiprocessing.queues
`
40
41
``
41
42
`from multiprocessing import util
`
42
43
``
64
65
`def latin(s):
`
65
66
`return s.encode('latin')
`
66
67
``
``
68
+
``
69
`+
def close_queue(queue):
`
``
70
`+
if isinstance(queue, multiprocessing.queues.Queue):
`
``
71
`+
queue.close()
`
``
72
`+
queue.join_thread()
`
``
73
+
``
74
+
67
75
`#
`
68
76
`# Constants
`
69
77
`#
`
`@@ -275,6 +283,7 @@ def test_process(self):
`
275
283
`self.assertEqual(p.exitcode, 0)
`
276
284
`self.assertEqual(p.is_alive(), False)
`
277
285
`self.assertNotIn(p, self.active_children())
`
``
286
`+
close_queue(q)
`
278
287
``
279
288
`@classmethod
`
280
289
`def _test_terminate(cls):
`
`@@ -414,6 +423,7 @@ def test_lose_target_ref(self):
`
414
423
`p.join()
`
415
424
`self.assertIs(wr(), None)
`
416
425
`self.assertEqual(q.get(), 5)
`
``
426
`+
close_queue(q)
`
417
427
``
418
428
``
419
429
`#
`
`@@ -600,6 +610,7 @@ def test_put(self):
`
600
610
`self.assertEqual(queue_full(queue, MAXSIZE), False)
`
601
611
``
602
612
`proc.join()
`
``
613
`+
close_queue(queue)
`
603
614
``
604
615
`@classmethod
`
605
616
`def _test_get(cls, queue, child_can_start, parent_can_continue):
`
`@@ -662,6 +673,7 @@ def test_get(self):
`
662
673
`self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
`
663
674
``
664
675
`proc.join()
`
``
676
`+
close_queue(queue)
`
665
677
``
666
678
`@classmethod
`
667
679
`def _test_fork(cls, queue):
`
`@@ -697,6 +709,7 @@ def test_fork(self):
`
697
709
`self.assertRaises(pyqueue.Empty, queue.get, False)
`
698
710
``
699
711
`p.join()
`
``
712
`+
close_queue(queue)
`
700
713
``
701
714
`def test_qsize(self):
`
702
715
`q = self.Queue()
`
`@@ -712,6 +725,7 @@ def test_qsize(self):
`
712
725
`self.assertEqual(q.qsize(), 1)
`
713
726
`q.get()
`
714
727
`self.assertEqual(q.qsize(), 0)
`
``
728
`+
close_queue(q)
`
715
729
``
716
730
`@classmethod
`
717
731
`def _test_task_done(cls, q):
`
`@@ -739,6 +753,7 @@ def test_task_done(self):
`
739
753
``
740
754
`for p in workers:
`
741
755
`p.join()
`
``
756
`+
close_queue(queue)
`
742
757
``
743
758
`def test_no_import_lock_contention(self):
`
744
759
`with test.support.temp_cwd():
`
`@@ -769,6 +784,7 @@ def test_timeout(self):
`
769
784
`# Tolerate a delta of 30 ms because of the bad clock resolution on
`
770
785
`# Windows (usually 15.6 ms)
`
771
786
`self.assertGreaterEqual(delta, 0.170)
`
``
787
`+
close_queue(q)
`
772
788
``
773
789
`def test_queue_feeder_donot_stop_onexc(self):
`
774
790
`# bpo-30414: verify feeder handles exceptions correctly
`
`@@ -782,7 +798,9 @@ def reduce(self):
`
782
798
`q = self.Queue()
`
783
799
`q.put(NotSerializable())
`
784
800
`q.put(True)
`
785
``
`-
self.assertTrue(q.get(timeout=0.1))
`
``
801
`+
bpo-30595: use a timeout of 1 second for slow buildbots
`
``
802
`+
self.assertTrue(q.get(timeout=1.0))
`
``
803
`+
close_queue(q)
`
786
804
``
787
805
`#
`
788
806
`#
`
`@@ -895,10 +913,12 @@ def test_notify(self):
`
895
913
`p = self.Process(target=self.f, args=(cond, sleeping, woken))
`
896
914
`p.daemon = True
`
897
915
`p.start()
`
``
916
`+
self.addCleanup(p.join)
`
898
917
``
899
918
`p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
`
900
919
`p.daemon = True
`
901
920
`p.start()
`
``
921
`+
self.addCleanup(p.join)
`
902
922
``
903
923
`# wait for both children to start sleeping
`
904
924
`sleeping.acquire()
`
`@@ -941,11 +961,13 @@ def test_notify_all(self):
`
941
961
`args=(cond, sleeping, woken, TIMEOUT1))
`
942
962
`p.daemon = True
`
943
963
`p.start()
`
``
964
`+
self.addCleanup(p.join)
`
944
965
``
945
966
`t = threading.Thread(target=self.f,
`
946
967
`args=(cond, sleeping, woken, TIMEOUT1))
`
947
968
`t.daemon = True
`
948
969
`t.start()
`
``
970
`+
self.addCleanup(t.join)
`
949
971
``
950
972
`# wait for them all to sleep
`
951
973
`for i in range(6):
`
`@@ -964,10 +986,12 @@ def test_notify_all(self):
`
964
986
`p = self.Process(target=self.f, args=(cond, sleeping, woken))
`
965
987
`p.daemon = True
`
966
988
`p.start()
`
``
989
`+
self.addCleanup(p.join)
`
967
990
``
968
991
`t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
`
969
992
`t.daemon = True
`
970
993
`t.start()
`
``
994
`+
self.addCleanup(t.join)
`
971
995
``
972
996
`# wait for them to all sleep
`
973
997
`for i in range(6):
`
`@@ -1143,6 +1167,7 @@ def test_event(self):
`
1143
1167
`p.daemon = True
`
1144
1168
`p.start()
`
1145
1169
`self.assertEqual(wait(), True)
`
``
1170
`+
p.join()
`
1146
1171
``
1147
1172
`#
`
1148
1173
`# Tests for Barrier - adapted from tests in test/lock_tests.py
`
`@@ -1318,6 +1343,7 @@ def test_wait_return(self):
`
1318
1343
`self.run_threads(self._test_wait_return_f, (self.barrier, queue))
`
1319
1344
`results = [queue.get() for i in range(self.N)]
`
1320
1345
`self.assertEqual(results.count(0), 1)
`
``
1346
`+
close_queue(queue)
`
1321
1347
``
1322
1348
`@classmethod
`
1323
1349
`def _test_action_f(cls, barrier, results):
`
`@@ -1488,6 +1514,7 @@ def test_thousand(self):
`
1488
1514
`p = self.Process(target=self._test_thousand_f,
`
1489
1515
`args=(self.barrier, passes, child_conn, lock))
`
1490
1516
`p.start()
`
``
1517
`+
self.addCleanup(p.join)
`
1491
1518
``
1492
1519
`for i in range(passes):
`
1493
1520
`for j in range(self.N):
`
`@@ -2971,6 +2998,8 @@ def test_access(self):
`
2971
2998
`w.close()
`
2972
2999
`self.assertEqual(conn.recv(), 'foobar'*2)
`
2973
3000
``
``
3001
`+
p.join()
`
``
3002
+
2974
3003
`#
`
2975
3004
`#
`
2976
3005
`#
`
`@@ -3296,16 +3325,16 @@ def test_level(self):
`
3296
3325
``
3297
3326
`logger.setLevel(LEVEL1)
`
3298
3327
`p = self.Process(target=self._test_level, args=(writer,))
`
3299
``
`-
p.daemon = True
`
3300
3328
`p.start()
`
3301
3329
`self.assertEqual(LEVEL1, reader.recv())
`
``
3330
`+
p.join()
`
3302
3331
``
3303
3332
`logger.setLevel(logging.NOTSET)
`
3304
3333
`root_logger.setLevel(LEVEL2)
`
3305
3334
`p = self.Process(target=self._test_level, args=(writer,))
`
3306
``
`-
p.daemon = True
`
3307
3335
`p.start()
`
3308
3336
`self.assertEqual(LEVEL2, reader.recv())
`
``
3337
`+
p.join()
`
3309
3338
``
3310
3339
`root_logger.setLevel(root_level)
`
3311
3340
`logger.setLevel(level=LOG_LEVEL)
`
`@@ -3459,7 +3488,7 @@ def _this_sub_process(q):
`
3459
3488
`except pyqueue.Empty:
`
3460
3489
`pass
`
3461
3490
``
3462
``
`-
def _test_process(q):
`
``
3491
`+
def _test_process():
`
3463
3492
`queue = multiprocessing.Queue()
`
3464
3493
`subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
`
3465
3494
`subProc.daemon = True
`
`@@ -3499,8 +3528,7 @@ def flush(self):
`
3499
3528
`class TestStdinBadfiledescriptor(unittest.TestCase):
`
3500
3529
``
3501
3530
`def test_queue_in_process(self):
`
3502
``
`-
queue = multiprocessing.Queue()
`
3503
``
`-
proc = multiprocessing.Process(target=_test_process, args=(queue,))
`
``
3531
`+
proc = multiprocessing.Process(target=_test_process)
`
3504
3532
`proc.start()
`
3505
3533
`proc.join()
`
3506
3534
``
`@@ -4108,7 +4136,32 @@ def test_empty(self):
`
4108
4136
`# Mixins
`
4109
4137
`#
`
4110
4138
``
4111
``
`-
class ProcessesMixin(object):
`
``
4139
`+
class BaseMixin(object):
`
``
4140
`+
@classmethod
`
``
4141
`+
def setUpClass(cls):
`
``
4142
`+
cls.dangling = (multiprocessing.process._dangling.copy(),
`
``
4143
`+
threading._dangling.copy())
`
``
4144
+
``
4145
`+
@classmethod
`
``
4146
`+
def tearDownClass(cls):
`
``
4147
`+
bpo-26762: Some multiprocessing objects like Pool create reference
`
``
4148
`+
cycles. Trigger a garbage collection to break these cycles.
`
``
4149
`+
test.support.gc_collect()
`
``
4150
+
``
4151
`+
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
`
``
4152
`+
if processes:
`
``
4153
`+
print('Warning -- Dangling processes: %s' % processes,
`
``
4154
`+
file=sys.stderr)
`
``
4155
`+
processes = None
`
``
4156
+
``
4157
`+
threads = set(threading._dangling) - set(cls.dangling[1])
`
``
4158
`+
if threads:
`
``
4159
`+
print('Warning -- Dangling threads: %s' % threads,
`
``
4160
`+
file=sys.stderr)
`
``
4161
`+
threads = None
`
``
4162
+
``
4163
+
``
4164
`+
class ProcessesMixin(BaseMixin):
`
4112
4165
`TYPE = 'processes'
`
4113
4166
`Process = multiprocessing.Process
`
4114
4167
`connection = multiprocessing.connection
`
`@@ -4131,7 +4184,7 @@ class ProcessesMixin(object):
`
4131
4184
`RawArray = staticmethod(multiprocessing.RawArray)
`
4132
4185
``
4133
4186
``
4134
``
`-
class ManagerMixin(object):
`
``
4187
`+
class ManagerMixin(BaseMixin):
`
4135
4188
`TYPE = 'manager'
`
4136
4189
`Process = multiprocessing.Process
`
4137
4190
`Queue = property(operator.attrgetter('manager.Queue'))
`
`@@ -4155,30 +4208,43 @@ def Pool(cls, *args, **kwds):
`
4155
4208
``
4156
4209
`@classmethod
`
4157
4210
`def setUpClass(cls):
`
``
4211
`+
super().setUpClass()
`
4158
4212
`cls.manager = multiprocessing.Manager()
`
4159
4213
``
4160
4214
`@classmethod
`
4161
4215
`def tearDownClass(cls):
`
4162
4216
`# only the manager process should be returned by active_children()
`
4163
4217
`# but this can take a bit on slow machines, so wait a few seconds
`
4164
4218
`# if there are other children too (see #17395)
`
``
4219
`+
start_time = time.monotonic()
`
4165
4220
`t = 0.01
`
4166
``
`-
while len(multiprocessing.active_children()) > 1 and t < 5:
`
``
4221
`+
while len(multiprocessing.active_children()) > 1:
`
4167
4222
`time.sleep(t)
`
4168
4223
`t *= 2
`
``
4224
`+
dt = time.monotonic() - start_time
`
``
4225
`+
if dt >= 5.0:
`
``
4226
`+
print("Warning -- multiprocessing.Manager still has %s active "
`
``
4227
`+
"children after %s seconds"
`
``
4228
`+
% (multiprocessing.active_children(), dt),
`
``
4229
`+
file=sys.stderr)
`
``
4230
`+
break
`
``
4231
+
4169
4232
`gc.collect() # do garbage collection
`
4170
4233
`if cls.manager._number_of_objects() != 0:
`
4171
4234
`# This is not really an error since some tests do not
`
4172
4235
`# ensure that all processes which hold a reference to a
`
4173
4236
`# managed object have been joined.
`
4174
``
`-
print('Shared objects which still exist at manager shutdown:')
`
``
4237
`+
print('Warning -- Shared objects which still exist at manager '
`
``
4238
`+
'shutdown:')
`
4175
4239
`print(cls.manager._debug_info())
`
4176
4240
`cls.manager.shutdown()
`
4177
4241
`cls.manager.join()
`
4178
4242
`cls.manager = None
`
4179
4243
``
``
4244
`+
super().tearDownClass()
`
``
4245
+
4180
4246
``
4181
``
`-
class ThreadsMixin(object):
`
``
4247
`+
class ThreadsMixin(BaseMixin):
`
4182
4248
`TYPE = 'threads'
`
4183
4249
`Process = multiprocessing.dummy.Process
`
4184
4250
`connection = multiprocessing.dummy.connection
`
`@@ -4255,18 +4321,33 @@ def setUpModule():
`
4255
4321
`multiprocessing.get_logger().setLevel(LOG_LEVEL)
`
4256
4322
``
4257
4323
`def tearDownModule():
`
``
4324
`+
need_sleep = False
`
``
4325
+
``
4326
`+
bpo-26762: Some multiprocessing objects like Pool create reference
`
``
4327
`+
cycles. Trigger a garbage collection to break these cycles.
`
``
4328
`+
test.support.gc_collect()
`
``
4329
+
4258
4330
`multiprocessing.set_start_method(old_start_method[0], force=True)
`
4259
4331
`# pause a bit so we don't get warning about dangling threads/processes
`
4260
``
`-
time.sleep(0.5)
`
``
4332
`+
processes = set(multiprocessing.process._dangling) - set(dangling[0])
`
``
4333
`+
if processes:
`
``
4334
`+
need_sleep = True
`
``
4335
`+
print('Warning -- Dangling processes: %s' % processes,
`
``
4336
`+
file=sys.stderr)
`
``
4337
`+
processes = None
`
``
4338
+
``
4339
`+
threads = set(threading._dangling) - set(dangling[1])
`
``
4340
`+
if threads:
`
``
4341
`+
need_sleep = True
`
``
4342
`+
print('Warning -- Dangling threads: %s' % threads,
`
``
4343
`+
file=sys.stderr)
`
``
4344
`+
threads = None
`
``
4345
+
``
4346
`+
Sleep 500 ms to give time to child processes to complete.
`
``
4347
`+
if need_sleep:
`
``
4348
`+
time.sleep(0.5)
`
4261
4349
`multiprocessing.process._cleanup()
`
4262
``
`-
gc.collect()
`
4263
``
`-
tmp = set(multiprocessing.process._dangling) - set(dangling[0])
`
4264
``
`-
if tmp:
`
4265
``
`-
print('Dangling processes:', tmp, file=sys.stderr)
`
4266
``
`-
del tmp
`
4267
``
`-
tmp = set(threading._dangling) - set(dangling[1])
`
4268
``
`-
if tmp:
`
4269
``
`-
print('Dangling threads:', tmp, file=sys.stderr)
`
``
4350
`+
test.support.gc_collect()
`
4270
4351
``
4271
4352
`remote_globs['setUpModule'] = setUpModule
`
4272
4353
`remote_globs['tearDownModule'] = tearDownModule
`