target_model_runner.py 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. from typing import List, Optional
  2. from aphrodite.common.config import (CacheConfig, DeviceConfig, LoadConfig,
  3. LoRAConfig, ModelConfig, ParallelConfig,
  4. PromptAdapterConfig, SchedulerConfig)
  5. from aphrodite.common.sequence import SequenceGroupMetadata
  6. from aphrodite.task_handler.model_runner import (
  7. ModelInputForGPUWithSamplingMetadata, ModelRunner)
  8. class TargetModelRunner(ModelRunner):
  9. """Specialized model runner for speculative decoding target model.
  10. In speculative decoding, the log probabilities selected finally may not
  11. be the same ones as selected by the target model sampling. This means
  12. that the time spent in the log probability calculation of the target model
  13. is time wasted, since we calculate log probabilities after deciding which
  14. tokens are accepted. For this reason disabling log probabilities in the
  15. target model will make decode faster. The model runner sets the
  16. SamplingMetadata parameters according to whether log probabilities are
  17. requested or not.
  18. """
  19. def __init__(self,
  20. model_config: ModelConfig,
  21. parallel_config: ParallelConfig,
  22. scheduler_config: SchedulerConfig,
  23. device_config: DeviceConfig,
  24. cache_config: CacheConfig,
  25. load_config: LoadConfig,
  26. lora_config: Optional[LoRAConfig],
  27. kv_cache_dtype: Optional[str] = "auto",
  28. is_driver_worker: bool = False,
  29. prompt_adapter_config: Optional[PromptAdapterConfig] = None,
  30. return_hidden_states: bool = False,
  31. **kwargs):
  32. # An internal boolean member variable to indicate if token log
  33. # probabilities are needed or not.
  34. self.disable_logprobs = True
  35. super().__init__(
  36. model_config=model_config,
  37. parallel_config=parallel_config,
  38. scheduler_config=scheduler_config,
  39. device_config=device_config,
  40. cache_config=cache_config,
  41. load_config=load_config,
  42. lora_config=lora_config,
  43. kv_cache_dtype=kv_cache_dtype,
  44. is_driver_worker=is_driver_worker,
  45. prompt_adapter_config=prompt_adapter_config,
  46. return_hidden_states=return_hidden_states,
  47. **kwargs,
  48. )
  49. def prepare_model_input(
  50. self,
  51. seq_group_metadata_list: List[SequenceGroupMetadata],
  52. virtual_engine: int = 0,
  53. finished_requests_ids: Optional[List[str]] = None
  54. ) -> ModelInputForGPUWithSamplingMetadata:
  55. model_input: ModelInputForGPUWithSamplingMetadata = super(
  56. ).prepare_model_input(seq_group_metadata_list, virtual_engine,
  57. finished_requests_ids)
  58. # If token log probabilities is disabled then skip generating sampler
  59. # CPU output. We directly serialize the GPU sampled_token_id tensors
  60. # as needed. If log probabilities is enabled then synchronize all the
  61. # sampling related tensors which includes the logprobs tensors.
  62. model_input.sampling_metadata.skip_sampler_cpu_output = (
  63. self.disable_logprobs)
  64. return model_input