multi_step_model_runner.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. from dataclasses import dataclass, field
  2. from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
  3. try:
  4. from aphrodite.attention.backends.flash_attn import FlashAttentionMetadata
  5. except ModuleNotFoundError:
  6. # aphrodite_flash_attn is not installed, use the identical ROCm FA metadata
  7. from aphrodite.attention.backends.rocm_flash_attn import (
  8. ROCmFlashAttentionMetadata as FlashAttentionMetadata,
  9. )
  10. import torch
  11. from aphrodite import _custom_ops as ops
  12. from aphrodite.common.sequence import (CompletionSequenceGroupOutput,
  13. IntermediateTensors, Logprob,
  14. SamplerOutput, SequenceGroupMetadata,
  15. SequenceOutput)
  16. from aphrodite.distributed import get_pp_group
  17. from aphrodite.modeling.model_loader.tensorizer import TensorizerConfig
  18. from aphrodite.task_handler.model_runner import (
  19. GPUModelRunnerBase, ModelInputForGPUWithSamplingMetadata)
  20. from aphrodite.task_handler.model_runner_base import (
  21. BroadcastableModelInput, _init_attn_metadata_from_tensor_dict,
  22. _init_frozen_model_input_from_tensor_dict,
  23. _init_sampling_metadata_from_tensor_dict)
  24. if TYPE_CHECKING:
  25. from aphrodite.attention.backends.abstract import AttentionBackend
  26. @dataclass
  27. class ModelOutput:
  28. """The output of a single model forward pass.
  29. The sampler_output_ready_event is set when the tensors in
  30. sampler_output are ready (the model+sampler forward pass has
  31. completed). We use the event to synchronize the GPU->CPU transfer,
  32. which we want to only run when the data has been written to the
  33. GPU tensors. Until the event is ready, the tensors in sampler_output
  34. will have garbage data.
  35. There are two scenarios:
  36. 1. The output tensors are ready and we can pythonize them immediately.
  37. 2. The output tensors are not ready and we need to wait for the event to be
  38. ready.
  39. """
  40. sampler_output: SamplerOutput
  41. sampler_output_ready_event: torch.cuda.Event
  42. sampled_token_ids: Optional[torch.Tensor] = None
  43. pythonized: bool = False
  44. def pythonize(
  45. self,
  46. input_metadata: "StatefulModelInput",
  47. copy_stream: torch.cuda.Stream,
  48. pinned_sampled_token_buffer: torch.Tensor,
  49. ) -> None:
  50. """Pythonize the output. Blocking."""
  51. if not self.pythonized:
  52. self._pythonize_sampler_output(
  53. input_metadata, copy_stream, pinned_sampled_token_buffer, True
  54. )
  55. self.pythonized = True
  56. def maybe_pythonize(
  57. self,
  58. input_metadata: "StatefulModelInput",
  59. copy_stream: torch.cuda.Stream,
  60. pinned_sampled_token_buffer: torch.Tensor,
  61. ) -> None:
  62. """Pythonize the output if ready, else return None. Non-blocking."""
  63. if not self.pythonized:
  64. self.pythonized = self._pythonize_sampler_output(
  65. input_metadata, copy_stream, pinned_sampled_token_buffer, False
  66. )
  67. def _pythonize_sampler_output(
  68. self,
  69. input_metadata: "StatefulModelInput",
  70. copy_stream: torch.cuda.Stream,
  71. pinned_sampled_token_buffer: torch.Tensor,
  72. blocking: bool,
  73. ) -> bool:
  74. """
  75. If blocking is set, will block until the forward pass for the output is
  76. ready and pythonize the output.
  77. """
  78. assert self.sampled_token_ids is not None
  79. if not blocking and not self.sampler_output_ready_event.query():
  80. return False
  81. if blocking:
  82. self.sampler_output_ready_event.synchronize()
  83. with torch.cuda.stream(copy_stream):
  84. _pythonize_sampler_output(
  85. input_metadata,
  86. self.sampler_output,
  87. pinned_sampled_token_buffer,
  88. self.sampled_token_ids,
  89. )
  90. return True
  91. @dataclass(frozen=False)
  92. class StatefulModelInput(BroadcastableModelInput):
  93. # actual frozen model input dataclass passed to _base_model_runner
  94. frozen_model_input: Optional[ModelInputForGPUWithSamplingMetadata] = None
  95. # list of model outputs for each step, may not be all pythonized
  96. cached_outputs: List[ModelOutput] = field(default_factory=list)
  97. # used to pass sampled token ids from the last step to the current step for
  98. # TP workers. Used to append to end of outputs and used by advance_step
  99. last_sampled_token_ids: Optional[torch.Tensor] = None
  100. current_step: int = 0
  101. is_multi_step: bool = True
  102. is_last_step: bool = False
  103. is_first_multi_step: bool = False
  104. # ping-pong data structures for multi-step to wait on the previous step
  105. step_cuda_events: List[torch.cuda.Event] = field(
  106. default_factory=lambda: [torch.cuda.Event(blocking=True)] * 2
  107. )
  108. num_seqs: int = -1
  109. num_queries: int = -1
  110. def as_broadcastable_tensor_dict(self) -> Dict[str, Any]:
  111. assert self.frozen_model_input is not None
  112. tensor_dict = self.frozen_model_input.as_broadcastable_tensor_dict()
  113. new_tensor_dict = {
  114. "last_sampled_token_ids": self.last_sampled_token_ids,
  115. "current_step": self.current_step,
  116. "is_multi_step": self.is_multi_step,
  117. "is_last_step": self.is_last_step,
  118. "is_first_multi_step": self.is_first_multi_step,
  119. "num_seqs": self.num_seqs,
  120. "num_queries": self.num_queries,
  121. }
  122. tensor_dict.update(new_tensor_dict)
  123. return tensor_dict
  124. @classmethod
  125. def from_broadcasted_tensor_dict(
  126. cls,
  127. tensor_dict: Dict[str, Any],
  128. attn_backend: Optional["AttentionBackend"] = None,
  129. ) -> "StatefulModelInput":
  130. tensor_dict = _init_sampling_metadata_from_tensor_dict(tensor_dict)
  131. if attn_backend is not None:
  132. tensor_dict = _init_attn_metadata_from_tensor_dict(
  133. attn_backend, tensor_dict
  134. )
  135. tensor_dict = _init_frozen_model_input_from_tensor_dict(
  136. ModelInputForGPUWithSamplingMetadata, tensor_dict
  137. )
  138. return cls(**tensor_dict)
  139. def record_step_event(self, current_stream: torch.cuda.Stream):
  140. # record the event for the current step so that the next step can sync
  141. # on it. We modulo by 2 to keep the events in a circular buffer and
  142. # support any attn backends that may be supported in the future. ie
  143. # Flashinfer would want two DecodeWrappers to overlap the CPU and GPU.
  144. self.step_cuda_events[self.current_step & 1] = torch.cuda.Event(
  145. blocking=True
  146. )
  147. self.step_cuda_events[self.current_step & 1].record(current_stream)
  148. def wait_previous_step(self):
  149. # These cuda events are an explicit synchronization to ensure that
  150. # advance_step() (for other attn backends that may be supported in the
  151. # future) do not clobber any data structures that is also used by any
  152. # enqueued forwards steps. For distributed case, only a single event is
  153. # needed, but for single GPU case, since we can let the CPU run much
  154. # further ahead, two events allow us to overlap the advance_step with
  155. # the previous forward (ie using two DecodeWrappers for flashinfer
  156. # backend)
  157. self.step_cuda_events[(self.current_step + 1) & 1].wait()
  158. def add_sampler_output(
  159. self,
  160. sampler_output: SamplerOutput,
  161. sampled_token_ids: Optional[torch.Tensor] = None,
  162. ):
  163. self.cached_outputs.append(
  164. ModelOutput(
  165. sampler_output=sampler_output,
  166. sampler_output_ready_event=None,
  167. sampled_token_ids=sampled_token_ids,
  168. pythonized=False,
  169. )
  170. )
  171. # MutableModelInputForGPUWithMultiStepMetadata is not subclass of
  172. # ModelInputForGPU but it wraps the actual input dataclass and adds multi-step
  173. # metadata
  174. # mypy: disable-error-code=type-var
  175. class MultiStepModelRunner(GPUModelRunnerBase[StatefulModelInput]):
  176. # mypy: enable-error-code=type-var
  177. def __init__(self, base_model_runner: GPUModelRunnerBase, *args, **kwargs):
  178. super().__init__(*args, **kwargs)
  179. # uses the base model runner to execute the model and wraps it with
  180. # multi-step logic
  181. self._base_model_runner: GPUModelRunnerBase = base_model_runner
  182. self.is_multi_step = self.scheduler_config.is_multi_step
  183. # used to copy tensors from GPU to CPU asynchronously
  184. self._copy_stream = torch.cuda.Stream()
  185. self.pinned_sampled_token_ids: Optional[torch.Tensor] = None
  186. def make_model_input_from_broadcasted_tensor_dict(
  187. self, tensor_dict: Dict[str, Any]
  188. ) -> StatefulModelInput:
  189. model_input = StatefulModelInput.from_broadcasted_tensor_dict(
  190. tensor_dict,
  191. attn_backend=self.attn_backend,
  192. )
  193. return model_input
  194. def prepare_model_input(
  195. self,
  196. seq_group_metadata_list: List[SequenceGroupMetadata],
  197. virtual_engine: int = 0,
  198. finished_requests_ids: Optional[List[str]] = None,
  199. ) -> StatefulModelInput:
  200. frozen_model_input = self._base_model_runner.prepare_model_input(
  201. seq_group_metadata_list, virtual_engine, finished_requests_ids
  202. )
  203. model_input = StatefulModelInput(
  204. frozen_model_input=frozen_model_input,
  205. num_seqs=len(frozen_model_input.seq_lens),
  206. num_queries=len(frozen_model_input.query_lens),
  207. )
  208. return model_input
  209. @torch.inference_mode()
  210. def execute_model(
  211. self,
  212. model_input: StatefulModelInput,
  213. kv_caches: List[torch.Tensor],
  214. intermediate_tensors: Optional[IntermediateTensors] = None,
  215. num_steps: int = 1,
  216. ) -> Optional[Union[List[SamplerOutput], IntermediateTensors]]:
  217. """
  218. Execute the model for a single step and update multi-step
  219. metadata
  220. """
  221. assert num_steps == 1, "MultiStepModelRunner only supports num_steps=1"
  222. frozen_model_input = model_input.frozen_model_input
  223. assert frozen_model_input is not None
  224. # path for warm up runs
  225. if not model_input.is_multi_step:
  226. return self._base_model_runner.execute_model(
  227. frozen_model_input, kv_caches, intermediate_tensors, num_steps
  228. )
  229. # make sure we skip the sampler on the lask rank and only pythonize
  230. # if CPU is ahead.
  231. if self.is_driver_worker and get_pp_group().is_last_rank:
  232. if self.pinned_sampled_token_ids is None:
  233. self.pinned_sampled_token_ids = torch.zeros(
  234. (self.scheduler_config.max_num_seqs, 1),
  235. dtype=torch.long,
  236. device="cpu",
  237. pin_memory=True,
  238. )
  239. self._base_model_runner.model.sampler.include_gpu_probs_tensor = (
  240. True
  241. )
  242. if frozen_model_input.sampling_metadata:
  243. frozen_model_input.sampling_metadata.skip_sampler_cpu_output = (
  244. True
  245. )
  246. # some pre-execute model logic for multi-step:
  247. # - if it's the first step, we need to reset the sampling tensors
  248. # - if it's not the first step, we need to advance the step using the
  249. # appended sampler output from last iteration
  250. # - also maybe pythonize if CPU is ahead of GPU
  251. current_stream = torch.cuda.current_stream()
  252. if not model_input.is_first_multi_step:
  253. # Explicitly block on the previous step's forward to make sure we
  254. # don't clobber any GPU tensors still in use.
  255. # This is not needed for flashattn backend, but for other attn
  256. # backends such as flashinfer that performs extra CPU operations on
  257. # input metadata we may need to synchronize any CPU operations that
  258. # might clobber enqueued forwards. (prevents CPU from running too
  259. # far ahead if needed)
  260. model_input.wait_previous_step()
  261. model_input = self._advance_step(
  262. model_input, model_input.cached_outputs[-1].sampler_output
  263. )
  264. # Execute the model
  265. output = self._base_model_runner.execute_model(
  266. frozen_model_input, kv_caches, intermediate_tensors, num_steps=1
  267. )
  268. # record the event for the current step so that the next step can sync
  269. model_input.record_step_event(current_stream)
  270. if get_pp_group().is_last_rank and self.is_driver_worker:
  271. assert (
  272. len(output) == 1
  273. ), "MultiStepModelRunner requires single-step base_models"
  274. # event for the pythonization so that we only pythonize if the
  275. # tensors are ready. May be able to be combined with the step event
  276. output_ready_event = torch.cuda.Event()
  277. output_ready_event.record(current_stream)
  278. if self.parallel_config.pipeline_parallel_size > 1:
  279. output[0].sampled_token_ids_cpu = output[
  280. 0
  281. ].sampled_token_ids.cpu()
  282. model_input.cached_outputs.append(
  283. ModelOutput(
  284. output[0],
  285. output_ready_event,
  286. output[0].sampled_token_ids,
  287. False,
  288. )
  289. )
  290. # make sure we dont try to serialize any GPU tensors
  291. output[0].sampled_token_ids = None
  292. output[0].sampled_token_probs = None
  293. output[0].logprobs = None
  294. # Pythonize the output if CPU is ahead and the previous step is
  295. # ready.
  296. for model_output in model_input.cached_outputs:
  297. model_output.maybe_pythonize(
  298. model_input,
  299. self._copy_stream,
  300. self.pinned_sampled_token_ids,
  301. )
  302. model_input.current_step += 1
  303. if not get_pp_group().is_last_rank:
  304. # Should be IntermediateTensors
  305. assert isinstance(output, IntermediateTensors)
  306. return output
  307. if not self.is_driver_worker:
  308. return []
  309. # Pythonize the output and block if needed since it is the last step
  310. if model_input.is_last_step:
  311. outputs = []
  312. for output in model_input.cached_outputs:
  313. output.pythonize(
  314. model_input,
  315. self._copy_stream,
  316. self.pinned_sampled_token_ids,
  317. )
  318. outputs.append(output.sampler_output)
  319. return outputs
  320. # should be [SamplerOutput]
  321. return output
  322. def _update_sampling_metadata(
  323. self, sampling_metadata, num_seqs, num_queries
  324. ):
  325. assert sampling_metadata.num_prompts == 0
  326. assert len(sampling_metadata.seq_groups) == num_queries
  327. assert sampling_metadata.selected_token_indices.shape == (num_queries,)
  328. # assert sampling_metadata.categorized_sample_indices == TODO: Add if needed # noqa: E501
  329. # Verify that all sequences are decodes
  330. for i in range(num_queries):
  331. seq_group = sampling_metadata.seq_groups[i]
  332. assert seq_group.is_prompt is False # No prompt
  333. assert seq_group.prompt_logprob_indices == [] # No prompt
  334. assert seq_group.sample_indices == [i] # Simple
  335. assert seq_group.seq_len is None # Decode
  336. assert seq_group.query_len is None # Decode
  337. def _advance_step(
  338. self, model_input: StatefulModelInput, out: SamplerOutput
  339. ) -> StatefulModelInput:
  340. frozen_model_input = model_input.frozen_model_input
  341. assert frozen_model_input is not None
  342. assert frozen_model_input.attn_metadata is not None
  343. num_seqs = model_input.num_seqs
  344. num_queries = model_input.num_queries
  345. assert num_seqs > 0
  346. assert num_queries > 0
  347. assert num_seqs >= num_queries
  348. attn_metadata = frozen_model_input.attn_metadata
  349. assert isinstance(attn_metadata, FlashAttentionMetadata)
  350. attn_metadata.advance_step(num_seqs, num_queries)
  351. # Update GPU tensors
  352. ops.advance_step(
  353. num_seqs=num_seqs,
  354. num_queries=num_queries,
  355. block_size=self.block_size,
  356. input_tokens=frozen_model_input.input_tokens,
  357. sampled_token_ids=model_input.cached_outputs[-1].sampled_token_ids,
  358. input_positions=frozen_model_input.input_positions,
  359. seq_lens=attn_metadata.seq_lens_tensor,
  360. slot_mapping=attn_metadata.slot_mapping,
  361. block_tables=attn_metadata.block_tables,
  362. )
  363. if frozen_model_input.seq_lens is not None:
  364. for i in range(num_queries):
  365. frozen_model_input.seq_lens[i] = attn_metadata.seq_lens[i]
  366. return model_input
  367. def load_model(self) -> None:
  368. return self._base_model_runner.load_model()
  369. def save_sharded_state(
  370. self,
  371. path: str,
  372. pattern: Optional[str] = None,
  373. max_size: Optional[int] = None,
  374. ) -> None:
  375. return self._base_model_runner.save_sharded_state(
  376. path, pattern, max_size
  377. )
  378. def save_tensorized_model(
  379. self, tensorizer_config: TensorizerConfig
  380. ) -> None:
  381. return self._base_model_runner.save_tensorized_model(tensorizer_config)
  382. def profile_run(self) -> None:
  383. return self._base_model_runner.profile_run()
  384. def remove_all_loras(self):
  385. return self._base_model_runner.remove_all_loras()
  386. def capture_model(self, kv_caches: List[List]) -> None:
  387. return self._base_model_runner.capture_model(kv_caches)
  388. @property
  389. def vocab_size(self) -> int:
  390. return self._base_model_runner.vocab_size
  391. def _pythonize_sampler_output(
  392. model_input: StatefulModelInput,
  393. output: SamplerOutput,
  394. pinned_sampled_token_buffer: torch.Tensor,
  395. sampled_token_ids: torch.Tensor,
  396. ) -> None:
  397. """This function is only called when the output tensors are ready.
  398. See ModelOutput
  399. """
  400. assert model_input.frozen_model_input is not None
  401. frozen_model_input = model_input.frozen_model_input
  402. assert frozen_model_input.sampling_metadata is not None
  403. # samples generation should have been skipped
  404. assert not output.outputs
  405. pinned_buffer = pinned_sampled_token_buffer[: model_input.num_queries]
  406. # CPU GPU sync
  407. pinned_buffer = pinned_buffer.copy_(sampled_token_ids, non_blocking=False)
  408. # this will not block as the tensors are already on CPU
  409. samples_list = pinned_buffer.tolist()
  410. sampling_metadata = frozen_model_input.sampling_metadata
  411. for seq_group, sample_result in zip(
  412. sampling_metadata.seq_groups, samples_list
  413. ):
  414. seq_ids = seq_group.seq_ids
  415. next_token_ids = sample_result
  416. parent_ids = [0]
  417. seq_outputs: List[SequenceOutput] = []
  418. if seq_group.sampling_params.logits_processors:
  419. assert (
  420. len(seq_group.sampling_params.logits_processors) == 0
  421. ), "Logits Processors are not supported in multi-step decoding"
  422. for parent_id, next_token_id in zip(parent_ids, next_token_ids):
  423. # TODO(will): support logprobs
  424. # Hard coded logprob
  425. seq_outputs.append(
  426. SequenceOutput(
  427. seq_ids[parent_id],
  428. next_token_id,
  429. {next_token_id: Logprob(logprob=-1)},
  430. )
  431. )
  432. output.outputs.append(CompletionSequenceGroupOutput(seq_outputs, None))
  433. assert len(output.outputs) > 0