From 9d01b1132d890247b234662895cf28211b8585b1 Mon Sep 17 00:00:00 2001 From: Alex Orange Date: Thu, 31 Dec 2020 20:06:24 -0700 Subject: [PATCH 1/2] Fix for multiprocessing.queues.Queue not closing properly if no put. See pypy issue #3372 and CPython issue #42752 for context. This fixes the fact that close won't close the write pipe unless something had already been put in the Queue. --HG-- branch : py3_7_mp_queue_close_fix --- lib-python/3/multiprocessing/queues.py | 125 +++++++++++++-------- lib-python/3/test/_test_multiprocessing.py | 15 +++ 2 files changed, 91 insertions(+), 49 deletions(-) diff --git a/lib-python/3/multiprocessing/queues.py b/lib-python/3/multiprocessing/queues.py index 88f7d267bf..fbbe7ba19a 100644 --- a/lib-python/3/multiprocessing/queues.py +++ b/lib-python/3/multiprocessing/queues.py @@ -16,6 +16,7 @@ import collections import time import weakref import errno +import contextlib from queue import Empty, Full @@ -27,6 +28,19 @@ _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting + +class CleanExchange: + def __init__(self, obj, attr): + self.obj = obj + self.attr = attr + + def exchange(self): + result = getattr(self.obj, self.attr) + setattr(self.obj, self.attr, None) + self.obj = None + self.attr = None + return result + # # Queue type using a pipe, buffer and thread # @@ -72,7 +86,8 @@ class Queue(object): self._jointhread = None self._joincancelled = False self._closed = False - self._close = None + self._close = self._writer.close + self._sentinel_close = None self._send_bytes = self._writer.send_bytes self._recv_bytes = self._reader.recv_bytes self._poll = self._reader.poll @@ -137,6 +152,10 @@ class Queue(object): if close: self._close = None close() + sentinel_close = self._sentinel_close + if close: + self._sentinel_close = None + sentinel_close() def join_thread(self): debug('Queue.join_thread()') @@ -160,8 +179,8 @@ class Queue(object): self._thread = threading.Thread( target=Queue._feed, args=(self._buffer, self._notempty, self._send_bytes, - self._wlock, self._writer.close, self._ignore_epipe, - self._on_queue_feeder_error, self._sem), + self._wlock, CleanExchange(self, "_close"), + self._ignore_epipe, self._on_queue_feeder_error, self._sem), name='QueueFeederThread' ) self._thread.daemon = True @@ -178,7 +197,7 @@ class Queue(object): ) # Send sentinel to the thread queue object when garbage collected - self._close = Finalize( + self._sentinel_close = Finalize( self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10 @@ -202,8 +221,8 @@ class Queue(object): notempty.notify() @staticmethod - def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe, - onerror, queue_sem): + def _feed(buffer, notempty, send_bytes, writelock, close_exchanger, + ignore_epipe, onerror, queue_sem): debug('starting thread to feed data to pipe') nacquire = notempty.acquire nrelease = notempty.release @@ -216,52 +235,60 @@ class Queue(object): else: wacquire = None - while 1: + @contextlib.contextmanager + def manager(close_exchanger): + close = close_exchanger_callback.exchange() try: - nacquire() - try: - if not buffer: - nwait() - finally: - nrelease() + yield close + finally: + close() + + with manager(close_exchanger_callback) as close: + while 1: try: - while 1: - obj = bpopleft() - if obj is sentinel: - debug('feeder thread got sentinel -- exiting') - close() - return - - # serialize the data before acquiring the lock - obj = _ForkingPickler.dumps(obj) - if wacquire is None: - send_bytes(obj) - else: - wacquire() - try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while 1: + obj = bpopleft() + if obj is sentinel: + debug('feeder thread got sentinel -- exiting') + return + + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if wacquire is None: send_bytes(obj) - finally: - wrelease() - except IndexError: - pass - except Exception as e: - if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: - return - # Since this runs in a daemon thread the resources it uses - # may be become unusable while the process is cleaning up. - # We ignore errors which happen after the process has - # started to cleanup. - if is_exiting(): - info('error in queue thread: %s', e) - return - else: - # Since the object has not been sent in the queue, we need - # to decrease the size of the queue. The error acts as - # if the object had been silently removed from the queue - # and this step is necessary to have a properly working - # queue. - queue_sem.release() - onerror(e, obj) + else: + wacquire() + try: + send_bytes(obj) + finally: + wrelease() + except IndexError: + pass + except Exception as e: + if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if is_exiting(): + info('error in queue thread: %s', e) + return + else: + # Since the object has not been sent in the queue, we + # need to decrease the size of the queue. The error + # acts as if the object had been silently removed from + # the queue and this step is necessary to have a + # properly working queue. + queue_sem.release() + onerror(e, obj) @staticmethod def _on_queue_feeder_error(e, obj): diff --git a/lib-python/3/test/_test_multiprocessing.py b/lib-python/3/test/_test_multiprocessing.py index a324ae2a91..9492698e30 100644 --- a/lib-python/3/test/_test_multiprocessing.py +++ b/lib-python/3/test/_test_multiprocessing.py @@ -1117,6 +1117,21 @@ class _TestQueue(BaseTestCase): # Assert that the serialization and the hook have been called correctly self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) + + def test_closed_queue_closes_both(self): + q = multiprocessing.Queue() + q.put(1) + q.get() + q.close() + q.join_thread() + self.assertTrue(q._reader.closed) + self.assertTrue(q._writer.closed) + + q = multiprocessing.Queue() + q.close() + q.join_thread() + self.assertTrue(q._reader.closed) + self.assertTrue(q._writer.closed) # # # -- GitLab From bcee2515dd7ea35276cc461f901a22a969a90555 Mon Sep 17 00:00:00 2001 From: Alex Orange Date: Sat, 13 Mar 2021 11:13:31 -0700 Subject: [PATCH 2/2] Add PyPy added to fix tag. --HG-- branch : py3_7_mp_queue_close_fix --- lib-python/3/multiprocessing/queues.py | 1 + lib-python/3/test/_test_multiprocessing.py | 1 + 2 files changed, 2 insertions(+) diff --git a/lib-python/3/multiprocessing/queues.py b/lib-python/3/multiprocessing/queues.py index fbbe7ba19a..09c3c76660 100644 --- a/lib-python/3/multiprocessing/queues.py +++ b/lib-python/3/multiprocessing/queues.py @@ -29,6 +29,7 @@ _ForkingPickler = context.reduction.ForkingPickler from .util import debug, info, Finalize, register_after_fork, is_exiting +# PyPy added to fix issue bpo 42752 class CleanExchange: def __init__(self, obj, attr): self.obj = obj diff --git a/lib-python/3/test/_test_multiprocessing.py b/lib-python/3/test/_test_multiprocessing.py index 9492698e30..0b6a6c5d1a 100644 --- a/lib-python/3/test/_test_multiprocessing.py +++ b/lib-python/3/test/_test_multiprocessing.py @@ -1118,6 +1118,7 @@ class _TestQueue(BaseTestCase): self.assertTrue(not_serializable_obj.reduce_was_called) self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called) + # PyPy added to fix issue bpo 42752 def test_closed_queue_closes_both(self): q = multiprocessing.Queue() q.put(1) -- GitLab