thread.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. # Copyright 2009 Brian Quinlan. All Rights Reserved.
  2. # Licensed to PSF under a Contributor Agreement.
  3. """Implements ThreadPoolExecutor."""
  4. __author__ = 'Brian Quinlan (brian@sweetapp.com)'
  5. from concurrent.futures import _base
  6. import itertools
  7. import queue
  8. import threading
  9. import types
  10. import weakref
  11. import os
  12. _threads_queues = weakref.WeakKeyDictionary()
  13. _shutdown = False
  14. # Lock that ensures that new workers are not created while the interpreter is
  15. # shutting down. Must be held while mutating _threads_queues and _shutdown.
  16. _global_shutdown_lock = threading.Lock()
  17. def _python_exit():
  18. global _shutdown
  19. with _global_shutdown_lock:
  20. _shutdown = True
  21. items = list(_threads_queues.items())
  22. for t, q in items:
  23. q.put(None)
  24. for t, q in items:
  25. t.join()
  26. # Register for `_python_exit()` to be called just before joining all
  27. # non-daemon threads. This is used instead of `atexit.register()` for
  28. # compatibility with subinterpreters, which no longer support daemon threads.
  29. # See bpo-39812 for context.
  30. threading._register_atexit(_python_exit)
  31. # At fork, reinitialize the `_global_shutdown_lock` lock in the child process
  32. if hasattr(os, 'register_at_fork'):
  33. os.register_at_fork(before=_global_shutdown_lock.acquire,
  34. after_in_child=_global_shutdown_lock._at_fork_reinit,
  35. after_in_parent=_global_shutdown_lock.release)
  36. class _WorkItem(object):
  37. def __init__(self, future, fn, args, kwargs):
  38. self.future = future
  39. self.fn = fn
  40. self.args = args
  41. self.kwargs = kwargs
  42. def run(self):
  43. if not self.future.set_running_or_notify_cancel():
  44. return
  45. try:
  46. result = self.fn(*self.args, **self.kwargs)
  47. except BaseException as exc:
  48. self.future.set_exception(exc)
  49. # Break a reference cycle with the exception 'exc'
  50. self = None
  51. else:
  52. self.future.set_result(result)
  53. __class_getitem__ = classmethod(types.GenericAlias)
  54. def _worker(executor_reference, work_queue, initializer, initargs):
  55. if initializer is not None:
  56. try:
  57. initializer(*initargs)
  58. except BaseException:
  59. _base.LOGGER.critical('Exception in initializer:', exc_info=True)
  60. executor = executor_reference()
  61. if executor is not None:
  62. executor._initializer_failed()
  63. return
  64. try:
  65. while True:
  66. work_item = work_queue.get(block=True)
  67. if work_item is not None:
  68. work_item.run()
  69. # Delete references to object. See issue16284
  70. del work_item
  71. # attempt to increment idle count
  72. executor = executor_reference()
  73. if executor is not None:
  74. executor._idle_semaphore.release()
  75. del executor
  76. continue
  77. executor = executor_reference()
  78. # Exit if:
  79. # - The interpreter is shutting down OR
  80. # - The executor that owns the worker has been collected OR
  81. # - The executor that owns the worker has been shutdown.
  82. if _shutdown or executor is None or executor._shutdown:
  83. # Flag the executor as shutting down as early as possible if it
  84. # is not gc-ed yet.
  85. if executor is not None:
  86. executor._shutdown = True
  87. # Notice other workers
  88. work_queue.put(None)
  89. return
  90. del executor
  91. except BaseException:
  92. _base.LOGGER.critical('Exception in worker', exc_info=True)
  93. class BrokenThreadPool(_base.BrokenExecutor):
  94. """
  95. Raised when a worker thread in a ThreadPoolExecutor failed initializing.
  96. """
  97. class ThreadPoolExecutor(_base.Executor):
  98. # Used to assign unique thread names when thread_name_prefix is not supplied.
  99. _counter = itertools.count().__next__
  100. def __init__(self, max_workers=None, thread_name_prefix='',
  101. initializer=None, initargs=()):
  102. """Initializes a new ThreadPoolExecutor instance.
  103. Args:
  104. max_workers: The maximum number of threads that can be used to
  105. execute the given calls.
  106. thread_name_prefix: An optional name prefix to give our threads.
  107. initializer: A callable used to initialize worker threads.
  108. initargs: A tuple of arguments to pass to the initializer.
  109. """
  110. if max_workers is None:
  111. # ThreadPoolExecutor is often used to:
  112. # * CPU bound task which releases GIL
  113. # * I/O bound task (which releases GIL, of course)
  114. #
  115. # We use cpu_count + 4 for both types of tasks.
  116. # But we limit it to 32 to avoid consuming surprisingly large resource
  117. # on many core machine.
  118. max_workers = min(32, (os.cpu_count() or 1) + 4)
  119. if max_workers <= 0:
  120. raise ValueError("max_workers must be greater than 0")
  121. if initializer is not None and not callable(initializer):
  122. raise TypeError("initializer must be a callable")
  123. self._max_workers = max_workers
  124. self._work_queue = queue.SimpleQueue()
  125. self._idle_semaphore = threading.Semaphore(0)
  126. self._threads = set()
  127. self._broken = False
  128. self._shutdown = False
  129. self._shutdown_lock = threading.Lock()
  130. self._thread_name_prefix = (thread_name_prefix or
  131. ("ThreadPoolExecutor-%d" % self._counter()))
  132. self._initializer = initializer
  133. self._initargs = initargs
  134. def submit(self, fn, /, *args, **kwargs):
  135. with self._shutdown_lock, _global_shutdown_lock:
  136. if self._broken:
  137. raise BrokenThreadPool(self._broken)
  138. if self._shutdown:
  139. raise RuntimeError('cannot schedule new futures after shutdown')
  140. if _shutdown:
  141. raise RuntimeError('cannot schedule new futures after '
  142. 'interpreter shutdown')
  143. f = _base.Future()
  144. w = _WorkItem(f, fn, args, kwargs)
  145. self._work_queue.put(w)
  146. self._adjust_thread_count()
  147. return f
  148. submit.__doc__ = _base.Executor.submit.__doc__
  149. def _adjust_thread_count(self):
  150. # if idle threads are available, don't spin new threads
  151. if self._idle_semaphore.acquire(timeout=0):
  152. return
  153. # When the executor gets lost, the weakref callback will wake up
  154. # the worker threads.
  155. def weakref_cb(_, q=self._work_queue):
  156. q.put(None)
  157. num_threads = len(self._threads)
  158. if num_threads < self._max_workers:
  159. thread_name = '%s_%d' % (self._thread_name_prefix or self,
  160. num_threads)
  161. t = threading.Thread(name=thread_name, target=_worker,
  162. args=(weakref.ref(self, weakref_cb),
  163. self._work_queue,
  164. self._initializer,
  165. self._initargs))
  166. t.start()
  167. self._threads.add(t)
  168. _threads_queues[t] = self._work_queue
  169. def _initializer_failed(self):
  170. with self._shutdown_lock:
  171. self._broken = ('A thread initializer failed, the thread pool '
  172. 'is not usable anymore')
  173. # Drain work queue and mark pending futures failed
  174. while True:
  175. try:
  176. work_item = self._work_queue.get_nowait()
  177. except queue.Empty:
  178. break
  179. if work_item is not None:
  180. work_item.future.set_exception(BrokenThreadPool(self._broken))
  181. def shutdown(self, wait=True, *, cancel_futures=False):
  182. with self._shutdown_lock:
  183. self._shutdown = True
  184. if cancel_futures:
  185. # Drain all work items from the queue, and then cancel their
  186. # associated futures.
  187. while True:
  188. try:
  189. work_item = self._work_queue.get_nowait()
  190. except queue.Empty:
  191. break
  192. if work_item is not None:
  193. work_item.future.cancel()
  194. # Send a wake-up to prevent threads calling
  195. # _work_queue.get(block=True) from permanently blocking.
  196. self._work_queue.put(None)
  197. if wait:
  198. for t in self._threads:
  199. t.join()
  200. shutdown.__doc__ = _base.Executor.shutdown.__doc__