image.py 1.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950
  1. from functools import lru_cache
  2. import torch
  3. from loguru import logger
  4. from PIL import Image
  5. from aphrodite.common.config import ModelConfig
  6. from aphrodite.common.utils import is_list_of
  7. from aphrodite.inputs.registry import InputContext
  8. from aphrodite.transformers_utils.image_processor import get_image_processor
  9. from .base import MultiModalInputs, MultiModalPlugin
  10. cached_get_image_processor = lru_cache(get_image_processor)
  11. class ImagePlugin(MultiModalPlugin):
  12. def get_data_key(self) -> str:
  13. return "image"
  14. def _get_hf_image_processor(self, model_config: ModelConfig):
  15. return cached_get_image_processor(
  16. model_config.model,
  17. trust_remote_code=model_config.trust_remote_code)
  18. def _default_input_mapper(self, ctx: InputContext,
  19. data: object) -> MultiModalInputs:
  20. model_config = ctx.model_config
  21. if isinstance(data, Image.Image) or is_list_of(data, Image.Image):
  22. image_processor = self._get_hf_image_processor(model_config)
  23. if image_processor is None:
  24. raise RuntimeError("No HuggingFace processor is available"
  25. "to process the image object")
  26. try:
  27. batch_data = image_processor \
  28. .preprocess(data, return_tensors="pt") \
  29. .data
  30. except Exception:
  31. logger.error(f"Failed to process image ({data})")
  32. raise
  33. return MultiModalInputs(batch_data)
  34. elif isinstance(data, torch.Tensor) or is_list_of(data, torch.Tensor):
  35. return MultiModalInputs({"image_embeds": data})
  36. raise TypeError(f"Invalid image type: {type(data)}")
  37. def _default_max_multimodal_tokens(self, ctx: InputContext) -> int:
  38. return 3000