multiproc_worker_utils.py 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. import asyncio
  2. import multiprocessing
  3. import os
  4. import sys
  5. import threading
  6. import traceback
  7. import uuid
  8. from dataclasses import dataclass
  9. from multiprocessing import Queue
  10. from multiprocessing.connection import wait
  11. from multiprocessing.process import BaseProcess
  12. from typing import (Any, Callable, Dict, Generic, List, Optional, TextIO,
  13. TypeVar, Union)
  14. from loguru import logger
  15. T = TypeVar('T')
  16. _TERMINATE = "TERMINATE" # sentinel
  17. # ANSI color codes
  18. CYAN = '\033[1;36m'
  19. RESET = '\033[0;0m'
  20. JOIN_TIMEOUT_S = 2
  21. # Use dedicated multiprocess context for workers.
  22. # Both spawn and fork work
  23. mp_method = os.getenv("APHRODITE_WORKER_MULTIPROC_METHOD", "fork")
  24. mp = multiprocessing.get_context(mp_method)
  25. @dataclass
  26. class Result(Generic[T]):
  27. """Result of task dispatched to worker"""
  28. task_id: uuid.UUID
  29. value: Optional[T] = None
  30. exception: Optional[BaseException] = None
  31. class ResultFuture(threading.Event, Generic[T]):
  32. """Synchronous future for non-async case"""
  33. def __init__(self):
  34. super().__init__()
  35. self.result: Optional[Result[T]] = None
  36. def set_result(self, result: Result[T]):
  37. self.result = result
  38. self.set()
  39. def get(self) -> T:
  40. self.wait()
  41. assert self.result is not None
  42. if self.result.exception is not None:
  43. raise self.result.exception
  44. return self.result.value # type: ignore[return-value]
  45. def _set_future_result(future: Union[ResultFuture, asyncio.Future],
  46. result: Result):
  47. if isinstance(future, ResultFuture):
  48. future.set_result(result)
  49. return
  50. loop = future.get_loop()
  51. if not loop.is_closed():
  52. if result.exception is not None:
  53. loop.call_soon_threadsafe(future.set_exception, result.exception)
  54. else:
  55. loop.call_soon_threadsafe(future.set_result, result.value)
  56. class ResultHandler(threading.Thread):
  57. """Handle results from all workers (in background thread)"""
  58. def __init__(self) -> None:
  59. super().__init__(daemon=True)
  60. self.result_queue = mp.Queue()
  61. self.tasks: Dict[uuid.UUID, Union[ResultFuture, asyncio.Future]] = {}
  62. def run(self):
  63. for result in iter(self.result_queue.get, _TERMINATE):
  64. future = self.tasks.pop(result.task_id)
  65. _set_future_result(future, result)
  66. # Ensure that all waiters will receive an exception
  67. for task_id, future in self.tasks.items():
  68. _set_future_result(
  69. future,
  70. Result(task_id=task_id,
  71. exception=ChildProcessError("worker died")))
  72. def close(self):
  73. self.result_queue.put(_TERMINATE)
  74. class WorkerMonitor(threading.Thread):
  75. """Monitor worker status (in background thread)"""
  76. def __init__(self, workers: List['ProcessWorkerWrapper'],
  77. result_handler: ResultHandler):
  78. super().__init__(daemon=True)
  79. self.workers = workers
  80. self.result_handler = result_handler
  81. self._close = False
  82. def run(self) -> None:
  83. # Blocks until any worker exits
  84. dead_sentinels = wait([w.process.sentinel for w in self.workers])
  85. if not self._close:
  86. self._close = True
  87. # Kill / cleanup all workers
  88. for worker in self.workers:
  89. process = worker.process
  90. if process.sentinel in dead_sentinels:
  91. process.join(JOIN_TIMEOUT_S)
  92. if process.exitcode is not None and process.exitcode != 0:
  93. logger.error(f"Worker {process.name} pid {process.pid} "
  94. f"died, exit code: {process.exitcode}")
  95. # Cleanup any remaining workers
  96. logger.info("Killing local Aphrodite worker processes")
  97. for worker in self.workers:
  98. worker.kill_worker()
  99. # Must be done after worker task queues are all closed
  100. self.result_handler.close()
  101. for worker in self.workers:
  102. worker.process.join(JOIN_TIMEOUT_S)
  103. def close(self):
  104. if self._close:
  105. return
  106. self._close = True
  107. logger.info("Terminating local Aphrodite worker processes")
  108. for worker in self.workers:
  109. worker.terminate_worker()
  110. # Must be done after worker task queues are all closed
  111. self.result_handler.close()
  112. class ProcessWorkerWrapper:
  113. """Local process wrapper for aphrodite.task_handler.Worker,
  114. for handling single-node multi-GPU tensor parallel."""
  115. def __init__(self, result_handler: ResultHandler,
  116. worker_factory: Callable[[], Any]) -> None:
  117. self._task_queue = mp.Queue()
  118. self.result_queue = result_handler.result_queue
  119. self.tasks = result_handler.tasks
  120. self.process: BaseProcess = mp.Process( # type: ignore[attr-defined]
  121. target=_run_worker_process,
  122. name="AphroditeWorkerProcess",
  123. kwargs=dict(
  124. worker_factory=worker_factory,
  125. task_queue=self._task_queue,
  126. result_queue=self.result_queue,
  127. ),
  128. daemon=True)
  129. self.process.start()
  130. def _enqueue_task(self, future: Union[ResultFuture, asyncio.Future],
  131. method: str, args, kwargs):
  132. task_id = uuid.uuid4()
  133. self.tasks[task_id] = future
  134. try:
  135. self._task_queue.put((task_id, method, args, kwargs))
  136. except BaseException as e:
  137. del self.tasks[task_id]
  138. raise ChildProcessError("worker died") from e
  139. def execute_method(self, method: str, *args, **kwargs):
  140. future: ResultFuture = ResultFuture()
  141. self._enqueue_task(future, method, args, kwargs)
  142. return future
  143. async def execute_method_async(self, method: str, *args, **kwargs):
  144. future = asyncio.get_running_loop().create_future()
  145. self._enqueue_task(future, method, args, kwargs)
  146. return await future
  147. def terminate_worker(self):
  148. try:
  149. self._task_queue.put(_TERMINATE)
  150. except ValueError:
  151. self.process.kill()
  152. self._task_queue.close()
  153. def kill_worker(self):
  154. self._task_queue.close()
  155. self.process.kill()
  156. def _run_worker_process(
  157. worker_factory: Callable[[], Any],
  158. task_queue: Queue,
  159. result_queue: Queue,
  160. ) -> None:
  161. """Worker process event loop"""
  162. # Add process-specific prefix to stdout and stderr
  163. process_name = mp.current_process().name
  164. pid = os.getpid()
  165. _add_prefix(sys.stdout, process_name, pid)
  166. _add_prefix(sys.stderr, process_name, pid)
  167. # Initialize worker
  168. worker = worker_factory()
  169. del worker_factory
  170. # Accept tasks from the engine in task_queue
  171. # and return task output in result_queue
  172. logger.info("Worker ready; awaiting tasks")
  173. try:
  174. for items in iter(task_queue.get, _TERMINATE):
  175. output = None
  176. exception = None
  177. task_id, method, args, kwargs = items
  178. try:
  179. executor = getattr(worker, method)
  180. output = executor(*args, **kwargs)
  181. except BaseException as e:
  182. tb = traceback.format_exc()
  183. logger.error(f"Exception in worker {process_name} while "
  184. f"processing method {method}: {e}, {tb}")
  185. exception = e
  186. result_queue.put(
  187. Result(task_id=task_id, value=output, exception=exception))
  188. except KeyboardInterrupt:
  189. pass
  190. except Exception:
  191. logger.exception("Worker failed")
  192. logger.info("Worker exiting")
  193. def _add_prefix(file: TextIO, worker_name: str, pid: int) -> None:
  194. """Prepend each output line with process-specific prefix"""
  195. prefix = f"{CYAN}({worker_name} pid={pid}){RESET} "
  196. file_write = file.write
  197. def write_with_prefix(s: str):
  198. if not s:
  199. return
  200. if file.start_new_line: # type: ignore[attr-defined]
  201. file_write(prefix)
  202. idx = 0
  203. while (next_idx := s.find('\n', idx)) != -1:
  204. next_idx += 1
  205. file_write(s[idx:next_idx])
  206. if next_idx == len(s):
  207. file.start_new_line = True # type: ignore[attr-defined]
  208. return
  209. file_write(prefix)
  210. idx = next_idx
  211. file_write(s[idx:])
  212. file.start_new_line = False # type: ignore[attr-defined]
  213. file.start_new_line = True # type: ignore[attr-defined]
  214. file.write = write_with_prefix # type: ignore[method-assign]