proposer_worker_base.py 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. from abc import ABC, abstractmethod
  2. from typing import List, Optional, Set, Tuple
  3. from aphrodite.common.sequence import ExecuteModelRequest
  4. from aphrodite.modeling.layers.sampler import SamplerOutput
  5. from aphrodite.spec_decode.interfaces import SpeculativeProposer
  6. from aphrodite.worker.worker_base import LoraNotSupportedWorkerBase
  7. class ProposerWorkerBase(LoraNotSupportedWorkerBase, SpeculativeProposer):
  8. """Interface for proposer workers"""
  9. @abstractmethod
  10. def sampler_output(
  11. self,
  12. execute_model_req: ExecuteModelRequest,
  13. sample_len: int,
  14. # A set containing all sequence IDs that were assigned bonus tokens
  15. # in their last forward pass. This set is used to backfill the KV cache
  16. # with the key-value pairs of the penultimate token in the sequences.
  17. # This parameter is only used by the MultiStepWorker, which relies on
  18. # the KV cache for token generation. It is not used by workers that
  19. # do not utilize the KV cache.
  20. seq_ids_with_bonus_token_in_last_step: Set[int]
  21. ) -> Tuple[Optional[List[SamplerOutput]], bool]:
  22. raise NotImplementedError
  23. def set_include_gpu_probs_tensor(self) -> None:
  24. """Implementation optional"""
  25. pass
  26. def set_should_modify_greedy_probs_inplace(self) -> None:
  27. """Implementation optional"""
  28. pass
  29. class NonLLMProposerWorkerBase(ProposerWorkerBase, ABC):
  30. """Proposer worker which does not use a model with kvcache"""
  31. def execute_model(
  32. self,
  33. execute_model_req: Optional[ExecuteModelRequest] = None
  34. ) -> List[SamplerOutput]:
  35. """get_spec_proposals is used to get the proposals"""
  36. return []
  37. def determine_num_available_blocks(self) -> Tuple[int, int]:
  38. """This is never called on the proposer, only the target model"""
  39. raise NotImplementedError
  40. def initialize_cache(self, num_gpu_blocks: int,
  41. num_cpu_blocks: int) -> None:
  42. pass
  43. def get_cache_block_size_bytes(self) -> int:
  44. return 0