|
@@ -3,7 +3,8 @@ import tempfile
|
|
from dataclasses import dataclass
|
|
from dataclasses import dataclass
|
|
from functools import lru_cache
|
|
from functools import lru_cache
|
|
from pathlib import Path
|
|
from pathlib import Path
|
|
-from typing import Any, Awaitable, Iterable, List, Optional, Tuple, Union, cast
|
|
|
|
|
|
+from typing import (Any, Awaitable, Iterable, List, Literal, Optional, Tuple,
|
|
|
|
+ Union, cast)
|
|
|
|
|
|
import requests
|
|
import requests
|
|
from loguru import logger
|
|
from loguru import logger
|
|
@@ -23,10 +24,25 @@ from typing_extensions import Required, TypedDict
|
|
|
|
|
|
from aphrodite.common.config import ModelConfig
|
|
from aphrodite.common.config import ModelConfig
|
|
from aphrodite.multimodal import MultiModalDataDict
|
|
from aphrodite.multimodal import MultiModalDataDict
|
|
-from aphrodite.multimodal.utils import async_get_and_parse_image
|
|
|
|
|
|
+from aphrodite.multimodal.utils import (async_get_and_parse_audio,
|
|
|
|
+ async_get_and_parse_image)
|
|
from aphrodite.transformers_utils.tokenizer import AnyTokenizer
|
|
from aphrodite.transformers_utils.tokenizer import AnyTokenizer
|
|
|
|
|
|
|
|
|
|
|
|
+class AudioURL(TypedDict, total=False):
|
|
|
|
+ url: Required[str]
|
|
|
|
+ """
|
|
|
|
+ Either a URL of the audio or a data URL with base64 encoded audio data.
|
|
|
|
+ """
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+class ChatCompletionContentPartAudioParam(TypedDict, total=False):
|
|
|
|
+ audio_url: Required[AudioURL]
|
|
|
|
+
|
|
|
|
+ type: Required[Literal["audio_url"]]
|
|
|
|
+ """The type of the content part."""
|
|
|
|
+
|
|
|
|
+
|
|
class CustomChatCompletionContentPartParam(TypedDict, total=False):
|
|
class CustomChatCompletionContentPartParam(TypedDict, total=False):
|
|
__pydantic_config__ = ConfigDict(extra="allow") # type: ignore
|
|
__pydantic_config__ = ConfigDict(extra="allow") # type: ignore
|
|
|
|
|
|
@@ -35,6 +51,7 @@ class CustomChatCompletionContentPartParam(TypedDict, total=False):
|
|
|
|
|
|
|
|
|
|
ChatCompletionContentPartParam = Union[OpenAIChatCompletionContentPartParam,
|
|
ChatCompletionContentPartParam = Union[OpenAIChatCompletionContentPartParam,
|
|
|
|
+ ChatCompletionContentPartAudioParam,
|
|
CustomChatCompletionContentPartParam]
|
|
CustomChatCompletionContentPartParam]
|
|
|
|
|
|
|
|
|
|
@@ -103,35 +120,41 @@ def load_chat_template(
|
|
|
|
|
|
|
|
|
|
@lru_cache(maxsize=None)
|
|
@lru_cache(maxsize=None)
|
|
-def _image_token_str(model_config: ModelConfig,
|
|
|
|
- tokenizer: PreTrainedTokenizer) -> Optional[str]:
|
|
|
|
|
|
+def _mm_token_str(model_config: ModelConfig, tokenizer: PreTrainedTokenizer,
|
|
|
|
+ modality: Literal["image", "audio"]) -> Optional[str]:
|
|
# TODO: Let user specify how to insert image tokens into prompt
|
|
# TODO: Let user specify how to insert image tokens into prompt
|
|
# (similar to chat template)
|
|
# (similar to chat template)
|
|
- model_type = model_config.hf_config.model_type
|
|
|
|
- if model_type == "phi3_v":
|
|
|
|
- # Workaround since this token is not defined in the tokenizer
|
|
|
|
- return "<|image_1|>"
|
|
|
|
- if model_type == "minicpmv":
|
|
|
|
- return "()"
|
|
|
|
- if model_type in ("blip-2", "chatglm", "fuyu", "paligemma"):
|
|
|
|
- # These models do not use image tokens in the prompt
|
|
|
|
- return None
|
|
|
|
- if model_type.startswith("llava"):
|
|
|
|
- return tokenizer.decode(model_config.hf_config.image_token_index)
|
|
|
|
- if model_type in ("chameleon", "internvl_chat"):
|
|
|
|
- return ")"
|
|
|
|
+ if model_type in ("blip-2", "chatglm", "fuyu", "paligemma"):
|
|
|
|
+ # These models do not use image tokens in the prompt
|
|
|
|
+ return None
|
|
|
|
+ if model_type.startswith("llava"):
|
|
|
|
+ return tokenizer.decode(model_config.hf_config.image_token_index)
|
|
|
|
+ if model_type in ("chameleon", "internvl_chat"):
|
|
|
|
+ return "<image>"
|
|
|
|
+
|
|
|
|
+ raise TypeError(f"Unknown model type: {model_type}")
|
|
|
|
+ elif modality == "audio":
|
|
|
|
+ raise TypeError("No audio models are supported yet.")
|
|
|
|
+ else:
|
|
|
|
+ raise TypeError(f"Unknown modality: {modality}")
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+# TODO: Let user specify how to insert multimodal tokens into prompt
|
|
# (similar to chat template)
|
|
# (similar to chat template)
|
|
-def _get_full_image_text_prompt(image_token_str: str, text_prompt: str) -> str:
|
|
|
|
- """Combine image and text prompts for vision language model"""
|
|
|
|
|
|
+def _get_full_multimodal_text_prompt(placeholder_token_str: str,
|
|
|
|
+ text_prompt: str) -> str:
|
|
|
|
+ """Combine multimodal prompts for a multimodal language model"""
|
|
|
|
|
|
# NOTE: For now we assume all model architectures use the same
|
|
# NOTE: For now we assume all model architectures use the same
|
|
- # image + text prompt format. This may change in the future.
|
|
|
|
- return f"{image_token_str}\n{text_prompt}"
|
|
|
|
|
|
+ # placeholder + text prompt format. This may change in the future.
|
|
|
|
+ return f"{placeholder_token_str}\n{text_prompt}"
|
|
|
|
|
|
|
|
|
|
def _parse_chat_message_content_parts(
|
|
def _parse_chat_message_content_parts(
|
|
@@ -142,6 +165,7 @@ def _parse_chat_message_content_parts(
|
|
) -> ChatMessageParseResult:
|
|
) -> ChatMessageParseResult:
|
|
texts: List[str] = []
|
|
texts: List[str] = []
|
|
mm_futures: List[Awaitable[MultiModalDataDict]] = []
|
|
mm_futures: List[Awaitable[MultiModalDataDict]] = []
|
|
|
|
+ modality: Literal["image", "audio"] = "image"
|
|
|
|
|
|
for part in parts:
|
|
for part in parts:
|
|
part_type = part["type"]
|
|
part_type = part["type"]
|
|
@@ -149,9 +173,10 @@ def _parse_chat_message_content_parts(
|
|
text = cast(ChatCompletionContentPartTextParam, part)["text"]
|
|
text = cast(ChatCompletionContentPartTextParam, part)["text"]
|
|
texts.append(text)
|
|
texts.append(text)
|
|
elif part_type == "image_url":
|
|
elif part_type == "image_url":
|
|
|
|
+ modality = "image"
|
|
if len(mm_futures) > 0:
|
|
if len(mm_futures) > 0:
|
|
raise NotImplementedError(
|
|
raise NotImplementedError(
|
|
- "Multiple 'image_url' input is currently not supported.")
|
|
|
|
|
|
+ "Multiple multimodal inputs is currently not supported.")
|
|
|
|
|
|
image_url = cast(ChatCompletionContentPartImageParam,
|
|
image_url = cast(ChatCompletionContentPartImageParam,
|
|
part)["image_url"]
|
|
part)["image_url"]
|
|
@@ -163,21 +188,32 @@ def _parse_chat_message_content_parts(
|
|
|
|
|
|
image_future = async_get_and_parse_image(image_url["url"])
|
|
image_future = async_get_and_parse_image(image_url["url"])
|
|
mm_futures.append(image_future)
|
|
mm_futures.append(image_future)
|
|
|
|
+ elif part_type == "audio_url":
|
|
|
|
+ modality = "audio"
|
|
|
|
+ if len(mm_futures) > 0:
|
|
|
|
+ raise NotImplementedError(
|
|
|
|
+ "Multiple multimodal inputs is currently not supported.")
|
|
|
|
+
|
|
|
|
+ audio_url = cast(ChatCompletionContentPartAudioParam,
|
|
|
|
+ part)["audio_url"]
|
|
|
|
+ audio_future = async_get_and_parse_audio(audio_url["url"])
|
|
|
|
+ mm_futures.append(audio_future)
|
|
else:
|
|
else:
|
|
raise NotImplementedError(f"Unknown part type: {part_type}")
|
|
raise NotImplementedError(f"Unknown part type: {part_type}")
|
|
|
|
|
|
text_prompt = "\n".join(texts)
|
|
text_prompt = "\n".join(texts)
|
|
|
|
|
|
if mm_futures:
|
|
if mm_futures:
|
|
- image_token_str = _image_token_str(model_config, tokenizer)
|
|
|
|
- if image_token_str is not None:
|
|
|
|
- if image_token_str in text_prompt:
|
|
|
|
|
|
+ placeholder_token_str = _mm_token_str(model_config, tokenizer,
|
|
|
|
+ modality)
|
|
|
|
+ if placeholder_token_str is not None:
|
|
|
|
+ if placeholder_token_str in text_prompt:
|
|
logger.warning(
|
|
logger.warning(
|
|
- "Detected image token string in the text prompt. "
|
|
|
|
|
|
+ "Detected multi-modal token string in the text prompt. "
|
|
"Skipping prompt formatting.")
|
|
"Skipping prompt formatting.")
|
|
else:
|
|
else:
|
|
- text_prompt = _get_full_image_text_prompt(
|
|
|
|
- image_token_str=image_token_str,
|
|
|
|
|
|
+ text_prompt = _get_full_multimodal_text_prompt(
|
|
|
|
+ placeholder_token_str=placeholder_token_str,
|
|
text_prompt=text_prompt,
|
|
text_prompt=text_prompt,
|
|
)
|
|
)
|
|
|
|
|