base.py 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. import sys
  2. from abc import ABC, abstractmethod
  3. from collections import UserDict, defaultdict
  4. from typing import (Any, Callable, Dict, List, Optional, Type, TypedDict,
  5. TypeVar, Union)
  6. import torch
  7. import torch.types
  8. from loguru import logger
  9. from PIL import Image
  10. from torch import nn
  11. from aphrodite.common.config import ModelConfig
  12. from aphrodite.inputs import InputContext
  13. BatchedTensors = Union[torch.Tensor, List[torch.Tensor]]
  14. """
  15. If each input tensor in the batch has the same size, this is a single batched
  16. tensor; otherwise, this is a list of tensors with one element per batch.
  17. """
  18. if sys.version_info < (3, 9):
  19. # UserDict cannot be subscripted
  20. class _MultiModalInputsBase(UserDict):
  21. pass
  22. else:
  23. class _MultiModalInputsBase(UserDict[str, torch.Tensor]):
  24. pass
  25. class MultiModalInputs(_MultiModalInputsBase):
  26. """
  27. A dictionary that represents the keyword arguments to
  28. :meth:`~torch.nn.Module.forward`.
  29. """
  30. @staticmethod
  31. def try_concat(
  32. tensors: List[torch.Tensor],
  33. *,
  34. device: torch.types.Device,
  35. ) -> BatchedTensors:
  36. # Avoid initializing CUDA too early
  37. import torch
  38. unbatched_shape = tensors[0].shape[1:]
  39. for tensor in tensors:
  40. if tensor.shape[1:] != unbatched_shape:
  41. return [
  42. tensor.squeeze(0).to(device=device) for tensor in tensors
  43. ]
  44. return torch.cat(tensors, dim=0).to(device=device)
  45. @staticmethod
  46. def batch(
  47. inputs_list: List["MultiModalInputs"],
  48. device: torch.types.Device,
  49. ) -> Dict[str, BatchedTensors]:
  50. """Batch multiple inputs together into a dictionary."""
  51. if len(inputs_list) == 0:
  52. return {}
  53. keys = inputs_list[0].keys()
  54. item_lists: Dict[str, List[torch.Tensor]] = defaultdict(list)
  55. for inputs in inputs_list:
  56. if inputs.keys() != keys:
  57. msg = f"Inputs do not share the same keys ({keys})"
  58. raise ValueError(msg)
  59. for k, v in inputs.items():
  60. item_lists[k].append(v)
  61. return {
  62. k: MultiModalInputs.try_concat(item_list, device=device)
  63. for k, item_list in item_lists.items()
  64. }
  65. class MultiModalDataBuiltins(TypedDict, total=False):
  66. image: Image.Image
  67. MultiModalDataDict = Union[MultiModalDataBuiltins, Dict[str, Any]]
  68. """
  69. A dictionary containing an item for each modality type to input.
  70. The data belonging to each modality is converted into keyword arguments
  71. to the model by the corresponding mapper. By default, the mapper of
  72. the corresponding plugin with the same modality key is applied.
  73. """
  74. MultiModalInputMapper = Callable[[InputContext, object], MultiModalInputs]
  75. """
  76. Return a dictionary to be passed as keyword arguments to
  77. :meth:`~torch.nn.Module.forward`. This is similar in concept to tokenizers
  78. and processors in HuggingFace Transformers.
  79. If the data is not supported, throw :exc:`TypeError`.
  80. """
  81. MultiModalTokensCalc = Union[int, Callable[[InputContext], int]]
  82. """
  83. Calculate the maximum number of multimodal tokens input to the language
  84. model. This does not include tokens that correspond to the input text.
  85. """
  86. N = TypeVar("N", bound=Type[nn.Module])
  87. class MultiModalPlugin(ABC):
  88. """
  89. Base class that defines data processing logic for a specific modality.
  90. In particular, we adopt a registry pattern to dispatch data processing
  91. according to the model being used (considering that different models may
  92. process the same data differently). This registry is in turn used by
  93. :class:`~MultiModalRegistry` which acts at a higher level
  94. (i.e., the modality of the data).
  95. """
  96. def __init__(self) -> None:
  97. self._input_mappers: Dict[Type[nn.Module], MultiModalInputMapper] = {}
  98. self._max_mm_tokens: Dict[Type[nn.Module], MultiModalTokensCalc] = {}
  99. @abstractmethod
  100. def get_data_key(self) -> str:
  101. """
  102. Get the data key corresponding to the modality.
  103. """
  104. raise NotImplementedError
  105. @abstractmethod
  106. def _default_input_mapper(self, ctx: InputContext,
  107. data: object) -> MultiModalInputs:
  108. """
  109. Return a dictionary to be passed as keyword arguments to
  110. :meth:`~torch.nn.Module.forward`. This is similar in concept to
  111. tokenizers and processors in HuggingFace Transformers.
  112. If the data is not supported, throw :exc:`TypeError`.
  113. """
  114. raise NotImplementedError
  115. def register_input_mapper(
  116. self,
  117. mapper: Optional[MultiModalInputMapper] = None,
  118. ):
  119. """
  120. Register an input mapper to a model class.
  121. When the model receives input data that matches the modality served by
  122. this plugin (see :meth:`get_data_type`), the provided function is
  123. invoked to transform the data into a dictionary of model inputs.
  124. If `None` is provided, then the default input mapper is used instead.
  125. See also:
  126. :ref:`input_processing_pipeline`
  127. :ref:`adding_a_new_multimodal_model`
  128. """
  129. def wrapper(model_cls: N) -> N:
  130. if model_cls in self._input_mappers:
  131. logger.warning(
  132. f"Model class {model_cls} already has an input mapper "
  133. f"registered to {self}. It is overwritten by the new one.")
  134. self._input_mappers[model_cls] = mapper \
  135. or self._default_input_mapper
  136. return model_cls
  137. return wrapper
  138. def map_input(self, model_config: ModelConfig,
  139. data: object) -> MultiModalInputs:
  140. """
  141. Apply an input mapper to a data passed
  142. to the model, transforming the data into a dictionary of model inputs.
  143. If the data is not something that the mapper expects, throws TypeError.
  144. The model is identified by ``model_config``.
  145. See also:
  146. :ref:`adding_a_new_multimodal_model`
  147. """
  148. # Avoid circular import
  149. from aphrodite.modeling.model_loader import get_model_architecture
  150. model_cls, _ = get_model_architecture(model_config)
  151. mapper = self._input_mappers.get(model_cls)
  152. if mapper is None:
  153. raise KeyError(f"No input mapper in {self} is registered for "
  154. f"model class {model_cls.__name__}.")
  155. return mapper(InputContext(model_config), data)
  156. @abstractmethod
  157. def _default_max_multimodal_tokens(self, ctx: InputContext) -> int:
  158. """
  159. Calculate the maximum number of multimodal tokens input to the language
  160. model. This does not include tokens that correspond to the input text.
  161. """
  162. raise NotImplementedError
  163. def _validate_max_multimodal_tokens(self, max_mm_tokens: int):
  164. if max_mm_tokens < 1:
  165. raise ValueError("You should set the number of tokens to a "
  166. f"positive integer. Found: {max_mm_tokens}")
  167. def register_max_multimodal_tokens(
  168. self,
  169. max_mm_tokens: Optional[MultiModalTokensCalc] = None,
  170. ):
  171. """
  172. Register the maximum number of multi-modal tokens input to the
  173. language model for a model class.
  174. If `None` is provided, then the default calculation is used instead.
  175. See also:
  176. :ref:`adding_a_new_multimodal_model`
  177. """
  178. def wrapper(model_cls: N) -> N:
  179. if model_cls in self._max_mm_tokens:
  180. logger.warning(
  181. f"Model class {model_cls} already calculates maximum "
  182. f"number of tokens in {self}. It is overwritten by the "
  183. "new one.")
  184. if isinstance(max_mm_tokens, int):
  185. self._validate_max_multimodal_tokens(max_mm_tokens)
  186. self._max_mm_tokens[model_cls] = max_mm_tokens \
  187. or self._default_max_multimodal_tokens
  188. return model_cls
  189. return wrapper
  190. def get_max_multimodal_tokens(self, model_config: ModelConfig) -> int:
  191. """
  192. Get the maximum number of multi-modal tokens
  193. for profiling the memory usage of a model.
  194. If this registry is not applicable to the model, `0` is returned.
  195. The model is identified by ``model_config``.
  196. See also:
  197. :ref:`adding_a_new_multimodal_model`
  198. """
  199. # Avoid circular import
  200. from aphrodite.modeling.model_loader import get_model_architecture
  201. model_cls, _ = get_model_architecture(model_config)
  202. if model_cls not in self._input_mappers:
  203. return 0
  204. max_mm_tokens = self._max_mm_tokens.get(model_cls)
  205. if max_mm_tokens is None:
  206. raise KeyError(f"No maximum number of multi-modal tokens is given "
  207. f"for model class {model_cls.__name__} in {self}.")
  208. if callable(max_mm_tokens):
  209. max_mm_tokens = max_mm_tokens(InputContext(model_config))
  210. self._validate_max_multimodal_tokens(max_mm_tokens)
  211. return max_mm_tokens