registry.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. import functools
  2. from collections import UserDict
  3. from typing import Dict, Mapping, Optional, Sequence
  4. from loguru import logger
  5. from aphrodite.common.config import ModelConfig
  6. from .audio import AudioPlugin
  7. from .base import (MultiModalDataDict, MultiModalInputMapper, MultiModalInputs,
  8. MultiModalPlugin, MultiModalTokensCalc, NestedTensors)
  9. from .image import ImagePlugin
  10. class _MultiModalLimits(UserDict):
  11. """
  12. Wraps `_limits_by_model` for a more informative error message
  13. when attempting to access a model that does not exist.
  14. """
  15. def __getitem__(self, key: ModelConfig) -> Dict[str, int]:
  16. try:
  17. return super().__getitem__(key)
  18. except KeyError as exc:
  19. msg = (f"Cannot find `mm_limits` for model={key.model}. Did you "
  20. "forget to call `init_mm_limits_per_prompt`?")
  21. raise KeyError(msg) from exc
  22. class MultiModalRegistry:
  23. """
  24. A registry to dispatch data processing
  25. according to its modality and the target model.
  26. The registry handles both external and internal data input.
  27. """
  28. DEFAULT_PLUGINS = (ImagePlugin(), AudioPlugin())
  29. def __init__(
  30. self,
  31. *,
  32. plugins: Sequence[MultiModalPlugin] = DEFAULT_PLUGINS) -> None:
  33. self._plugins = {p.get_data_key(): p for p in plugins}
  34. # This is used for non-multimodal models
  35. self._disabled_limits_per_plugin = {k: 0 for k in self._plugins}
  36. self._limits_by_model = _MultiModalLimits()
  37. def register_plugin(self, plugin: MultiModalPlugin) -> None:
  38. data_type_key = plugin.get_data_key()
  39. if data_type_key in self._plugins:
  40. logger.warning(
  41. "A plugin is already registered for data type "
  42. f"{data_type_key}, "
  43. f"and will be overwritten by the new plugin {plugin}.")
  44. self._plugins[data_type_key] = plugin
  45. def _get_plugin(self, data_type_key: str):
  46. plugin = self._plugins.get(data_type_key)
  47. if plugin is not None:
  48. return plugin
  49. msg = f"Unknown multi-modal data type: {data_type_key}"
  50. raise NotImplementedError(msg)
  51. def register_input_mapper(
  52. self,
  53. data_type_key: str,
  54. mapper: Optional[MultiModalInputMapper] = None,
  55. ):
  56. """
  57. Register an input mapper for a specific modality to a model class.
  58. See :meth:`MultiModalPlugin.register_input_mapper` for more details.
  59. """
  60. return self._get_plugin(data_type_key).register_input_mapper(mapper)
  61. def register_image_input_mapper(
  62. self,
  63. mapper: Optional[MultiModalInputMapper] = None,
  64. ):
  65. """
  66. Register an input mapper for image data to a model class.
  67. See :meth:`MultiModalPlugin.register_input_mapper` for more details.
  68. """
  69. return self.register_input_mapper("image", mapper)
  70. def map_input(self, model_config: ModelConfig,
  71. data: MultiModalDataDict) -> MultiModalInputs:
  72. """
  73. Apply an input mapper to the data passed to the model.
  74. The data belonging to each modality is passed to the corresponding
  75. plugin which in turn converts the data into into keyword arguments
  76. via the input mapper registered for that model.
  77. See :meth:`MultiModalPlugin.map_input` for more details.
  78. Note:
  79. This should be called after :meth:`init_mm_limits_per_prompt`.
  80. """
  81. merged_dict: Dict[str, NestedTensors] = {}
  82. for data_key, data_value in data.items():
  83. plugin = self._get_plugin(data_key)
  84. num_items = len(data_value) if isinstance(data_value, list) else 1
  85. max_items = self._limits_by_model[model_config][data_key]
  86. if num_items > max_items:
  87. raise ValueError(
  88. f"You set {data_key}={max_items} (or defaulted to 1) in "
  89. f"`--limit-mm-per-prompt`, but found {num_items} items "
  90. "in the same prompt.")
  91. input_dict = plugin.map_input(model_config, data_value)
  92. for input_key, input_tensor in input_dict.items():
  93. if input_key in merged_dict:
  94. raise ValueError(f"The input mappers (keys={set(data)}) "
  95. f"resulted in a conflicting keyword "
  96. f"argument to `forward()`: {input_key}")
  97. merged_dict[input_key] = input_tensor
  98. return MultiModalInputs(merged_dict)
  99. def create_input_mapper(self, model_config: ModelConfig):
  100. """
  101. Create an input mapper (see :meth:`map_input`) for a specific model.
  102. """
  103. return functools.partial(self.map_input, model_config)
  104. def register_max_multimodal_tokens(
  105. self,
  106. data_type_key: str,
  107. max_mm_tokens: Optional[MultiModalTokensCalc] = None,
  108. ):
  109. """
  110. Register the maximum number of tokens, corresponding to a single
  111. instance of multimodal data belonging to a specific modality, that are
  112. passed to the language model for a model class.
  113. """
  114. return self._get_plugin(data_type_key) \
  115. .register_max_multimodal_tokens(max_mm_tokens)
  116. def register_max_image_tokens(
  117. self,
  118. max_mm_tokens: Optional[MultiModalTokensCalc] = None,
  119. ):
  120. """
  121. Register the maximum number of image tokens, corresponding to a single
  122. image, that are passed to the language model for a model class.
  123. """
  124. return self.register_max_multimodal_tokens("image", max_mm_tokens)
  125. def get_max_multimodal_tokens(self, model_config: ModelConfig) -> int:
  126. """
  127. Get the maximum number of multi-modal tokens
  128. for profiling the memory usage of a model.
  129. See :meth:`MultiModalPlugin.get_max_multimodal_tokens` for more details.
  130. Note:
  131. This should be called after :meth:`init_mm_limits_per_prompt`.
  132. """
  133. limits_per_plugin = self._limits_by_model[model_config]
  134. return sum((limits_per_plugin[key] *
  135. plugin.get_max_multimodal_tokens(model_config))
  136. for key, plugin in self._plugins.items())
  137. def init_mm_limits_per_prompt(
  138. self,
  139. model_config: ModelConfig,
  140. ) -> None:
  141. """
  142. Initialize the maximum number of multi-modal input instances for each
  143. modality that are allowed per prompt for a model class.
  144. """
  145. if model_config in self._limits_by_model:
  146. logger.warning(
  147. f"`mm_limits` has already been set for model="
  148. f"{model_config.model}, and will be overwritten by the "
  149. "new values.")
  150. multimodal_config = model_config.multimodal_config
  151. if multimodal_config is None:
  152. limits_per_plugin = self._disabled_limits_per_plugin
  153. else:
  154. config_limits_per_plugin = multimodal_config.limit_per_prompt
  155. extra_keys = config_limits_per_plugin.keys() - self._plugins.keys()
  156. if extra_keys:
  157. logger.warning(
  158. "Detected extra keys in `--limit-mm-per-prompt` which "
  159. f"are not registered as multi-modal plugins: {extra_keys}."
  160. " They will be ignored.")
  161. # NOTE: Currently the default is set to 1 for each plugin
  162. # TODO: Automatically determine the limits based on budget
  163. # once more models support multi-image inputs
  164. limits_per_plugin = {
  165. key: config_limits_per_plugin.get(key, 1)
  166. for key in self._plugins
  167. }
  168. self._limits_by_model[model_config] = limits_per_plugin
  169. def get_mm_limits_per_prompt(
  170. self,
  171. model_config: ModelConfig,
  172. ) -> Mapping[str, int]:
  173. """
  174. Get the maximum number of multi-modal input instances for each modality
  175. that are allowed per prompt for a model class.
  176. Note:
  177. This should be called after :meth:`init_mm_limits_per_prompt`.
  178. """
  179. return self._limits_by_model[model_config]