utils.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. import base64
  2. from functools import lru_cache
  3. from io import BytesIO
  4. from typing import Any, List, Optional, Tuple, TypeVar, Union
  5. import librosa
  6. import numpy as np
  7. import numpy.typing as npt
  8. import soundfile
  9. from loguru import logger
  10. from PIL import Image
  11. from aphrodite.common.envs import (APHRODITE_AUDIO_FETCH_TIMEOUT,
  12. APHRODITE_IMAGE_FETCH_TIMEOUT)
  13. from aphrodite.connections import global_http_connection
  14. from aphrodite.multimodal.base import MultiModalDataDict
  15. from aphrodite.transformers_utils.tokenizer import AnyTokenizer, get_tokenizer
  16. cached_get_tokenizer = lru_cache(get_tokenizer)
  17. def _load_image_from_bytes(b: bytes):
  18. image = Image.open(BytesIO(b))
  19. image.load()
  20. return image
  21. def _load_image_from_data_url(image_url: str):
  22. # Only split once and assume the second part is the base64 encoded image
  23. _, image_base64 = image_url.split(",", 1)
  24. return load_image_from_base64(image_base64)
  25. def fetch_image(image_url: str, *, image_mode: str = "RGB") -> Image.Image:
  26. """
  27. Load a PIL image from a HTTP or base64 data URL.
  28. By default, the image is converted into RGB format.
  29. """
  30. if image_url.startswith('http'):
  31. image_raw = global_http_connection.get_bytes(
  32. image_url, timeout=APHRODITE_IMAGE_FETCH_TIMEOUT)
  33. image = _load_image_from_bytes(image_raw)
  34. elif image_url.startswith('data:image'):
  35. image = _load_image_from_data_url(image_url)
  36. else:
  37. raise ValueError("Invalid 'image_url': A valid 'image_url' must start "
  38. "with either 'data:image' or 'http'.")
  39. return image.convert(image_mode)
  40. async def async_fetch_image(image_url: str,
  41. *,
  42. image_mode: str = "RGB") -> Image.Image:
  43. """
  44. Asynchronously load a PIL image from a HTTP or base64 data URL.
  45. By default, the image is converted into RGB format.
  46. """
  47. if image_url.startswith('http'):
  48. image_raw = await global_http_connection.async_get_bytes(
  49. image_url, timeout=APHRODITE_IMAGE_FETCH_TIMEOUT)
  50. image = _load_image_from_bytes(image_raw)
  51. elif image_url.startswith('data:image'):
  52. image = _load_image_from_data_url(image_url)
  53. else:
  54. raise ValueError("Invalid 'image_url': A valid 'image_url' must start "
  55. "with either 'data:image' or 'http'.")
  56. return image.convert(image_mode)
  57. def fetch_audio(audio_url: str) -> Tuple[np.ndarray, Union[int, float]]:
  58. """
  59. Load audio from a URL.
  60. """
  61. if audio_url.startswith("http"):
  62. audio_bytes = global_http_connection.get_bytes(
  63. audio_url, timeout=APHRODITE_AUDIO_FETCH_TIMEOUT)
  64. elif audio_url.startswith("data:audio"):
  65. _, audio_base64 = audio_url.split(",", 1)
  66. audio_bytes = base64.b64decode(audio_base64)
  67. else:
  68. raise ValueError("Invalid 'audio_url': A valid 'audio_url' must start "
  69. "with either 'data:audio' or 'http'.")
  70. return librosa.load(BytesIO(audio_bytes), sr=None)
  71. async def async_fetch_audio(
  72. audio_url: str) -> Tuple[np.ndarray, Union[int, float]]:
  73. """
  74. Asynchronously fetch audio from a URL.
  75. """
  76. if audio_url.startswith("http"):
  77. audio_bytes = await global_http_connection.async_get_bytes(
  78. audio_url, timeout=APHRODITE_AUDIO_FETCH_TIMEOUT)
  79. elif audio_url.startswith("data:audio"):
  80. _, audio_base64 = audio_url.split(",", 1)
  81. audio_bytes = base64.b64decode(audio_base64)
  82. else:
  83. raise ValueError("Invalid 'audio_url': A valid 'audio_url' must start "
  84. "with either 'data:audio' or 'http'.")
  85. return librosa.load(BytesIO(audio_bytes), sr=None)
  86. def get_and_parse_audio(audio_url: str) -> MultiModalDataDict:
  87. audio, sr = fetch_audio(audio_url)
  88. return {"audio": (audio, sr)}
  89. def get_and_parse_image(image_url: str) -> MultiModalDataDict:
  90. image = fetch_image(image_url)
  91. return {"image": image}
  92. async def async_get_and_parse_audio(audio_url: str) -> MultiModalDataDict:
  93. audio, sr = await async_fetch_audio(audio_url)
  94. return {"audio": (audio, sr)}
  95. async def async_get_and_parse_image(image_url: str) -> MultiModalDataDict:
  96. image = await async_fetch_image(image_url)
  97. return {"image": image}
  98. def encode_audio_base64(
  99. audio: np.ndarray,
  100. sampling_rate: int,
  101. ) -> str:
  102. """Encode audio as base64."""
  103. buffered = BytesIO()
  104. soundfile.write(buffered, audio, sampling_rate, format="WAV")
  105. return base64.b64encode(buffered.getvalue()).decode('utf-8')
  106. def encode_image_base64(
  107. image: Image.Image,
  108. *,
  109. image_mode: str = "RGB",
  110. format: str = "JPEG",
  111. ) -> str:
  112. """
  113. Encode a pillow image to base64 format.
  114. By default, the image is converted into RGB format before being encoded.
  115. """
  116. buffered = BytesIO()
  117. image = image.convert(image_mode)
  118. image.save(buffered, format)
  119. return base64.b64encode(buffered.getvalue()).decode('utf-8')
  120. def load_image_from_base64(image: Union[bytes, str]) -> Image.Image:
  121. """Load image from base64 format."""
  122. return _load_image_from_bytes(base64.b64decode(image))
  123. def rescale_image_size(image: Image.Image,
  124. size_factor: float,
  125. transpose: int = -1) -> Image.Image:
  126. """Rescale the dimensions of an image by a constant factor."""
  127. new_width = int(image.width * size_factor)
  128. new_height = int(image.height * size_factor)
  129. image = image.resize((new_width, new_height))
  130. if transpose >= 0:
  131. image = image.transpose(Image.Transpose(transpose))
  132. return image
  133. def try_import_video_packages() -> Any:
  134. try:
  135. import cv2
  136. except ImportError:
  137. raise ImportError(
  138. "Please install opencv-python for video support."
  139. ) from None
  140. return cv2
  141. def resize_video(frames: npt.NDArray, size: Tuple[int, int]) -> npt.NDArray:
  142. cv2 = try_import_video_packages()
  143. num_frames, _, _, channels = frames.shape
  144. new_height, new_width = size
  145. resized_frames = np.empty((num_frames, new_height, new_width, channels),
  146. dtype=frames.dtype)
  147. for i, frame in enumerate(frames):
  148. resized_frame = cv2.resize(frame, (new_width, new_height))
  149. resized_frames[i] = resized_frame
  150. return resized_frames
  151. def rescale_video_size(frames: npt.NDArray, size_factor: float) -> npt.NDArray:
  152. _, height, width, _ = frames.shape
  153. new_height = int(height * size_factor)
  154. new_width = int(width * size_factor)
  155. return resize_video(frames, (new_height, new_width))
  156. def sample_frames_from_video(frames: npt.NDArray,
  157. num_frames: int) -> npt.NDArray:
  158. total_frames = frames.shape[0]
  159. if num_frames == -1:
  160. return frames
  161. else:
  162. frame_indices = np.linspace(0, total_frames - 1, num_frames, dtype=int)
  163. sampled_frames = frames[frame_indices, ...]
  164. return sampled_frames
  165. # Utilities for input processors
  166. _T = TypeVar("_T", str, int)
  167. def repeat_and_pad_token(
  168. token: _T,
  169. *,
  170. repeat_count: int = 1,
  171. pad_token_left: Optional[_T] = None,
  172. pad_token_right: Optional[_T] = None,
  173. ) -> List[_T]:
  174. replacement = [token] * repeat_count
  175. if pad_token_left is not None:
  176. replacement = [pad_token_left] + replacement
  177. if pad_token_right is not None:
  178. replacement = replacement + [pad_token_right]
  179. return replacement
  180. def repeat_and_pad_placeholder_tokens(
  181. tokenizer: AnyTokenizer,
  182. prompt: Optional[str],
  183. prompt_token_ids: List[int],
  184. *,
  185. placeholder_token_id: int,
  186. repeat_count: Union[int, List[int]],
  187. pad_token_left: Optional[int] = None,
  188. pad_token_right: Optional[int] = None,
  189. ) -> Tuple[Optional[str], List[int]]:
  190. if isinstance(repeat_count, int):
  191. repeat_count = [repeat_count]
  192. if prompt is None:
  193. new_prompt = None
  194. else:
  195. placeholder_token_str = tokenizer.decode(placeholder_token_id)
  196. pad_token_str_left = (None if pad_token_left is None else
  197. tokenizer.decode(pad_token_left))
  198. pad_token_str_right = (None if pad_token_right is None else
  199. tokenizer.decode(pad_token_right))
  200. placeholder_token_count = prompt.count(placeholder_token_str)
  201. # This is an arbitrary number to distinguish between the two cases
  202. if placeholder_token_count > 16:
  203. logger.warning(
  204. "Please follow the prompt format that is "
  205. "documented on HuggingFace which does not involve "
  206. f"repeating {placeholder_token_str} tokens.")
  207. if placeholder_token_count < len(repeat_count):
  208. logger.warning(
  209. "The number of multi-modal placeholder tokens in the prompt "
  210. "is less than the number of multi-modal inputs. Extra "
  211. "placeholder tokens will be treated as plain text")
  212. repeat_count = repeat_count[:placeholder_token_count]
  213. prompt_parts = prompt.split(placeholder_token_str,
  214. maxsplit=len(repeat_count))
  215. new_prompt = ""
  216. for i, repeat_count_item in enumerate(repeat_count):
  217. replacement_str = "".join(
  218. repeat_and_pad_token(
  219. placeholder_token_str,
  220. repeat_count=repeat_count_item,
  221. pad_token_left=pad_token_str_left,
  222. pad_token_right=pad_token_str_right,
  223. ))
  224. # The image tokens are removed to be consistent with HuggingFace
  225. new_prompt += prompt_parts[i] + replacement_str
  226. new_prompt += prompt_parts[-1]
  227. new_token_ids: List[int] = []
  228. placeholder_token_idx = 0
  229. for i, token in enumerate(prompt_token_ids):
  230. if token == placeholder_token_id:
  231. replacement_ids = repeat_and_pad_token(
  232. placeholder_token_id,
  233. repeat_count=repeat_count[placeholder_token_idx],
  234. pad_token_left=pad_token_left,
  235. pad_token_right=pad_token_right,
  236. )
  237. new_token_ids.extend(replacement_ids)
  238. placeholder_token_idx += 1
  239. # No need to further scan the list since we replaced all tokens
  240. if placeholder_token_idx >= len(repeat_count):
  241. new_token_ids.extend(prompt_token_ids[i + 1:])
  242. break
  243. else:
  244. new_token_ids.append(token)
  245. return new_prompt, new_token_ids