mlp_speculator_worker.py 3.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. from typing import List, Optional, Set, Tuple
  2. import torch
  3. from aphrodite.common.sequence import (ExecuteModelRequest,
  4. SequenceGroupMetadata)
  5. from aphrodite.modeling.layers.sampler import SamplerOutput
  6. from aphrodite.modeling.sampling_metadata import SamplingMetadata
  7. from aphrodite.spec_decode.multi_step_worker import MultiStepWorker
  8. from aphrodite.spec_decode.proposer_worker_base import NonLLMProposerWorkerBase
  9. class MLPSpeculatorWorker(NonLLMProposerWorkerBase, MultiStepWorker):
  10. """Worker for MLPSpeculator models.
  11. Not currently compatible with LoRA or chunked prefill.
  12. """
  13. @torch.inference_mode()
  14. def sampler_output(
  15. self,
  16. execute_model_req: ExecuteModelRequest,
  17. sample_len: int,
  18. # Unused parameter. MLPSpeculatorWorker does not use the KV Cache and
  19. # therefore does not need this parameter.
  20. seq_ids_with_bonus_token_in_last_step: Set[int],
  21. ) -> Tuple[List[SamplerOutput], bool]:
  22. """Run the model forward pass to generate sample_len future tokens.
  23. Returns the list of sampler output, one per layer, along with indicator
  24. of whether torch tensor in sampler output need to be transposed in
  25. latter sampler_output_to_torch logic.
  26. For mlp spec worker, this indicator shall be True.
  27. """
  28. self._raise_if_unsupported(execute_model_req)
  29. seq_group_metadata_list = execute_model_req.seq_group_metadata_list
  30. (input_tokens, seq_lens,
  31. query_lens) = self._prepare_input_tensors(seq_group_metadata_list)
  32. generators = self.model_runner.get_generators(
  33. execute_model_req.finished_requests_ids)
  34. sampling_metadata = SamplingMetadata.prepare(
  35. seq_group_metadata_list, seq_lens, query_lens, self.device,
  36. self.model_runner.pin_memory, generators)
  37. model_outputs = self.model_runner.model.generate_proposals(
  38. input_ids=input_tokens,
  39. previous_hidden_states=execute_model_req.previous_hidden_states.
  40. hidden_states,
  41. num_predict_tokens=sample_len,
  42. sampling_metadata=sampling_metadata)
  43. assert len(model_outputs) == sample_len
  44. return model_outputs, True
  45. def _prepare_input_tensors(
  46. self,
  47. seq_group_metadata_list: Optional[List[SequenceGroupMetadata]],
  48. ) -> Tuple[torch.Tensor, List[int], List[int]]:
  49. if not seq_group_metadata_list:
  50. return torch.empty(0, device=self.device), [], []
  51. input_tokens: List[int] = []
  52. seq_lens: List[int] = []
  53. query_lens: List[int] = []
  54. for seq_group_metadata in seq_group_metadata_list:
  55. is_prompt = seq_group_metadata.is_prompt
  56. for seq_data in seq_group_metadata.seq_data.values():
  57. seq_data_len = seq_data.get_len()
  58. if is_prompt:
  59. context_len = seq_data.get_num_computed_tokens()
  60. seq_len = min(
  61. seq_data_len,
  62. context_len + seq_group_metadata.token_chunk_size)
  63. tokens = seq_data.get_token_ids()[context_len:seq_len]
  64. seq_lens.append(seq_len)
  65. input_tokens.extend(tokens)
  66. query_lens.append(seq_len - context_len)
  67. else:
  68. seq_lens.append(seq_data_len)
  69. input_tokens.append(seq_data.get_last_token_id())
  70. query_lens.append(1)
  71. input_tokens_tensor = torch.tensor(input_tokens,
  72. dtype=torch.long,
  73. device=self.device)
  74. return input_tokens_tensor, seq_lens, query_lens