xpu_executor.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. from typing import List, Optional
  2. import torch
  3. from loguru import logger
  4. from aphrodite.common.config import (CacheConfig, DeviceConfig, LoadConfig,
  5. LoRAConfig, ModelConfig, MultiModalConfig,
  6. ParallelConfig, PromptAdapterConfig,
  7. SchedulerConfig, SpeculativeConfig)
  8. from aphrodite.common.sequence import ExecuteModelRequest, SamplerOutput
  9. from aphrodite.common.utils import make_async
  10. from aphrodite.executor.executor_base import ExecutorAsyncBase
  11. from aphrodite.executor.gpu_executor import GPUExecutor
  12. from aphrodite.task_handler.worker_base import WorkerWrapperBase
  13. class XPUExecutor(GPUExecutor):
  14. uses_ray: bool = False
  15. def __init__(
  16. self,
  17. model_config: ModelConfig,
  18. cache_config: CacheConfig,
  19. parallel_config: ParallelConfig,
  20. scheduler_config: SchedulerConfig,
  21. device_config: DeviceConfig,
  22. load_config: LoadConfig,
  23. lora_config: Optional[LoRAConfig],
  24. multimodal_config: Optional[MultiModalConfig],
  25. prompt_adapter_config: Optional[PromptAdapterConfig],
  26. speculative_config: Optional[SpeculativeConfig],
  27. ) -> None:
  28. assert device_config.device_type == "xpu"
  29. assert (not speculative_config
  30. ), "Speculative decoding not yet supported for XPU backend"
  31. model_config = _verify_and_get_model_config(model_config)
  32. self.model_config = model_config
  33. self.cache_config = cache_config
  34. self.load_config = load_config
  35. self.lora_config = lora_config
  36. self.parallel_config = parallel_config
  37. self.scheduler_config = scheduler_config
  38. self.device_config = device_config
  39. self.multimodal_config = multimodal_config
  40. self.prompt_adapter_config = prompt_adapter_config
  41. self.speculative_config = None
  42. # Instantiate the worker and load the model to GPU.
  43. self._init_executor()
  44. def _create_worker(self,
  45. local_rank: int = 0,
  46. rank: int = 0,
  47. distributed_init_method: Optional[str] = None):
  48. if self.speculative_config is None:
  49. worker_module_name = "aphrodite.task_handler.xpu_worker"
  50. worker_class_name = "XPUWorker"
  51. else:
  52. raise NotImplementedError(
  53. "XPU does not support speculative decoding")
  54. wrapper = WorkerWrapperBase(
  55. worker_module_name=worker_module_name,
  56. worker_class_name=worker_class_name,
  57. )
  58. wrapper.init_worker(**self._get_worker_kwargs(local_rank, rank,
  59. distributed_init_method))
  60. return wrapper.worker
  61. def execute_model(
  62. self,
  63. execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
  64. output = self.driver_worker.execute_model(execute_model_req)
  65. return output
  66. class XPUExecutorAsync(XPUExecutor, ExecutorAsyncBase):
  67. async def execute_model_async(
  68. self,
  69. execute_model_req: ExecuteModelRequest,
  70. ) -> List[SamplerOutput]:
  71. output = await make_async(self.driver_worker.execute_model
  72. )(execute_model_req=execute_model_req)
  73. return output
  74. def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig:
  75. if config.dtype == torch.bfloat16:
  76. logger.warning(
  77. "bfloat16 is not fully supported on XPU, casting to float16.")
  78. config.dtype = torch.float16
  79. if not config.enforce_eager:
  80. logger.warning(
  81. "CUDA graph is not supported on XPU, fallback to the eager "
  82. "mode.")
  83. config.enforce_eager = True
  84. return config