proposer_worker_base.py 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. from abc import ABC, abstractmethod
  2. from typing import List, Optional, Set, Tuple
  3. from aphrodite.common.sequence import ExecuteModelRequest, SamplerOutput
  4. from aphrodite.lora.request import LoRARequest
  5. from aphrodite.spec_decode.interfaces import SpeculativeProposer
  6. from aphrodite.task_handler.worker_base import LoraNotSupportedWorkerBase
  7. class ProposerWorkerBase(LoraNotSupportedWorkerBase, SpeculativeProposer):
  8. """Interface for proposer workers"""
  9. @abstractmethod
  10. def sampler_output(
  11. self,
  12. execute_model_req: ExecuteModelRequest,
  13. sample_len: int,
  14. # A set containing all sequence IDs that were assigned bonus tokens
  15. # in their last forward pass. This set is used to backfill the KV cache
  16. # with the key-value pairs of the penultimate token in the sequences.
  17. # This parameter is only used by the MultiStepWorker, which relies on
  18. # the KV cache for token generation. It is not used by workers that
  19. # do not utilize the KV cache.
  20. seq_ids_with_bonus_token_in_last_step: Set[int]
  21. ) -> Tuple[Optional[List[SamplerOutput]], bool]:
  22. raise NotImplementedError
  23. def set_include_gpu_probs_tensor(self) -> None:
  24. """Implementation optional"""
  25. pass
  26. def add_lora(self, lora_request: LoRARequest) -> bool:
  27. raise ValueError(f"{type(self)} does not support LoRA")
  28. def remove_lora(self, lora_id: int) -> bool:
  29. raise ValueError(f"{type(self)} does not support LoRA")
  30. def list_loras(self) -> Set[int]:
  31. raise ValueError(f"{type(self)} does not support LoRA")
  32. class NonLLMProposerWorkerBase(ProposerWorkerBase, ABC):
  33. """Proposer worker which does not use a model with kvcache"""
  34. def execute_model(
  35. self,
  36. execute_model_req: Optional[ExecuteModelRequest] = None
  37. ) -> List[SamplerOutput]:
  38. """get_spec_proposals is used to get the proposals"""
  39. return []
  40. def determine_num_available_blocks(self) -> Tuple[int, int]:
  41. """This is never called on the proposer, only the target model"""
  42. raise NotImplementedError
  43. def initialize_cache(self, num_gpu_blocks: int,
  44. num_cpu_blocks: int) -> None:
  45. pass
  46. def get_cache_block_size_bytes(self) -> int:
  47. return 0
  48. def add_lora(self, lora_request: LoRARequest) -> bool:
  49. raise ValueError(f"{type(self)} does not support LoRA")
  50. def remove_lora(self, lora_id: int) -> bool:
  51. raise ValueError(f"{type(self)} does not support LoRA")
  52. def list_loras(self) -> Set[int]:
  53. raise ValueError(f"{type(self)} does not support LoRA")