image.py 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. from functools import lru_cache
  2. import torch
  3. from loguru import logger
  4. from PIL import Image
  5. from aphrodite.common.config import ModelConfig
  6. from aphrodite.common.utils import is_list_of
  7. from aphrodite.inputs.registry import InputContext
  8. from aphrodite.transformers_utils.image_processor import get_image_processor
  9. from .base import MultiModalData, MultiModalInputs, MultiModalPlugin
  10. cached_get_image_processor = lru_cache(get_image_processor)
  11. class ImagePlugin(MultiModalPlugin):
  12. """Plugin for image data."""
  13. def get_data_key(self) -> str:
  14. return "image"
  15. def _get_hf_image_processor(self, model_config: ModelConfig):
  16. return cached_get_image_processor(
  17. model_config.model,
  18. trust_remote_code=model_config.trust_remote_code)
  19. def _default_input_mapper(
  20. self,
  21. ctx: InputContext,
  22. data: MultiModalData[object],
  23. ) -> MultiModalInputs:
  24. model_config = ctx.model_config
  25. # PIL image
  26. if isinstance(data, Image.Image) or is_list_of(data, Image.Image):
  27. image_processor = self._get_hf_image_processor(model_config)
  28. if image_processor is None:
  29. raise RuntimeError("No HuggingFace processor is available "
  30. "to process the image object")
  31. try:
  32. batch_data = image_processor \
  33. .preprocess(data, return_tensors="pt") \
  34. .data
  35. except Exception:
  36. logger.error(f"Failed to process image ({data})")
  37. raise
  38. return MultiModalInputs(batch_data)
  39. # Image embedding
  40. elif isinstance(data, torch.Tensor) or is_list_of(data, torch.Tensor):
  41. return MultiModalInputs({"image_embeds": data})
  42. raise TypeError(f"Invalid image type: {type(data)}")
  43. def _default_max_multimodal_tokens(self, ctx: InputContext) -> int:
  44. return 3000