ultravox.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. # Adapted from https://github.com/fixie-ai/ultravox/blob/ecd58c4041030bae2ad15aa6bcf04ab43199ea02/ultravox/model/ultravox_model.py
  2. """PyTorch Ultravox model."""
  3. import itertools
  4. import math
  5. from array import array
  6. from functools import lru_cache
  7. from typing import (Iterable, List, Literal, Mapping, Optional, Tuple,
  8. TypedDict, Union, cast)
  9. import librosa
  10. import numpy as np
  11. import torch
  12. import torch.utils.checkpoint
  13. from torch import nn
  14. from torch.nn import functional as F
  15. from transformers.models.whisper import WhisperFeatureExtractor
  16. from transformers.models.whisper.modeling_whisper import WhisperEncoder
  17. from aphrodite.attention import AttentionMetadata
  18. from aphrodite.common.config import CacheConfig, MultiModalConfig
  19. from aphrodite.common.sequence import (APHRODITE_TOKEN_ID_ARRAY_TYPE,
  20. SequenceData)
  21. from aphrodite.inputs import INPUT_REGISTRY
  22. from aphrodite.inputs.data import LLMInputs
  23. from aphrodite.inputs.registry import InputContext
  24. from aphrodite.modeling.layers.activation import SiluAndMul, get_act_fn
  25. from aphrodite.modeling.layers.layernorm import RMSNorm
  26. from aphrodite.modeling.layers.sampler import SamplerOutput
  27. from aphrodite.modeling.model_loader.weight_utils import default_weight_loader
  28. from aphrodite.modeling.models.interfaces import SupportsMultiModal
  29. from aphrodite.modeling.models.utils import (filter_weights,
  30. init_aphrodite_registered_model,
  31. merge_multimodal_embeddings)
  32. from aphrodite.modeling.sampling_metadata import SamplingMetadata
  33. from aphrodite.multimodal import MULTIMODAL_REGISTRY
  34. from aphrodite.multimodal.base import MultiModalInputs
  35. from aphrodite.multimodal.utils import (cached_get_tokenizer,
  36. repeat_and_pad_placeholder_tokens)
  37. from aphrodite.quantization.base_config import QuantizationConfig
  38. from aphrodite.transformers_utils.configs.ultravox import UltravoxConfig
  39. _AUDIO_PLACEHOLDER_TOKEN = 128002
  40. _AUDIO_TOKENS_PER_SECOND = 6.25
  41. class UltravoxAudioFeatureInputs(TypedDict):
  42. type: Literal["audio_features"]
  43. data: Union[torch.Tensor, List[torch.Tensor]]
  44. """Shape: `(batch_size * num_audios, 80, M)"""
  45. class UltravoxAudioEmbeddingInputs(TypedDict):
  46. type: Literal["audio_embeds"]
  47. data: torch.Tensor
  48. UltravoxAudioInputs = Union[UltravoxAudioFeatureInputs,
  49. UltravoxAudioEmbeddingInputs]
  50. @lru_cache
  51. def cached_feature_extractor(model_id: str) -> WhisperFeatureExtractor:
  52. return WhisperFeatureExtractor.from_pretrained(model_id)
  53. def whisper_feature_extractor(ctx: InputContext) -> WhisperFeatureExtractor:
  54. return cached_feature_extractor(
  55. ctx.get_hf_config(UltravoxConfig).audio_model_id)
  56. def get_ultravox_max_audio_tokens(ctx: InputContext):
  57. feature_extractor = whisper_feature_extractor(ctx)
  58. return math.ceil(feature_extractor.chunk_length * _AUDIO_TOKENS_PER_SECOND)
  59. def dummy_data_for_ultravox(
  60. ctx: InputContext,
  61. seq_len: int,
  62. mm_counts: Mapping[str, int],
  63. ):
  64. feature_extractor = whisper_feature_extractor(ctx)
  65. audio_count = mm_counts["audio"]
  66. audio_token_ids = array(APHRODITE_TOKEN_ID_ARRAY_TYPE, [
  67. _AUDIO_PLACEHOLDER_TOKEN
  68. ]) * get_ultravox_max_audio_tokens(ctx) * audio_count
  69. other_token_ids = array(APHRODITE_TOKEN_ID_ARRAY_TYPE,
  70. [0]) * (seq_len - len(audio_token_ids))
  71. audio_and_sr = (np.array([0.0] * feature_extractor.chunk_length), 1)
  72. mm_dict = {
  73. "audio":
  74. audio_and_sr if audio_count == 1 else [audio_and_sr] * audio_count
  75. }
  76. return (SequenceData(audio_token_ids + other_token_ids), mm_dict)
  77. def input_mapper_for_ultravox(ctx: InputContext, data: object):
  78. if isinstance(data, tuple):
  79. (audio, sr) = cast(Tuple[np.ndarray, Union[float, int]], data)
  80. feature_extractor = whisper_feature_extractor(ctx)
  81. if sr != feature_extractor.sampling_rate:
  82. audio = librosa.resample(audio,
  83. orig_sr=sr,
  84. target_sr=feature_extractor.sampling_rate)
  85. sr = feature_extractor.sampling_rate
  86. minimum_audio_length = feature_extractor.n_fft // 2 + 1
  87. if len(audio) < minimum_audio_length:
  88. # Not enough audio; pad it.
  89. audio = np.pad(audio, (0, minimum_audio_length - len(audio)))
  90. return MultiModalInputs({
  91. "audio_features":
  92. feature_extractor(audio,
  93. sampling_rate=sr,
  94. padding="longest",
  95. return_tensors="pt")["input_features"]
  96. })
  97. raise NotImplementedError(f"Unsupported data type: {type(data)}")
  98. def input_processor_for_ultravox(ctx: InputContext, llm_inputs: LLMInputs):
  99. multi_modal_data = llm_inputs.get("multi_modal_data")
  100. if multi_modal_data is None or "audio" not in multi_modal_data:
  101. return llm_inputs
  102. feature_extractor = whisper_feature_extractor(ctx)
  103. audio_data, sample_rate = multi_modal_data["audio"]
  104. audio_length = audio_data.shape[0]
  105. if sample_rate != feature_extractor.sampling_rate:
  106. # Account for resampling.
  107. adjustment = feature_extractor.sampling_rate / sample_rate
  108. audio_length = math.ceil(adjustment * audio_length)
  109. feature_extractor_output_length = math.ceil(
  110. (audio_length -
  111. (feature_extractor.hop_length - 1)) / feature_extractor.hop_length)
  112. uv_config = ctx.get_hf_config(UltravoxConfig)
  113. audio_num_tokens = min(
  114. max(
  115. 1,
  116. math.ceil(feature_extractor_output_length /
  117. (uv_config.stack_factor * 2))),
  118. get_ultravox_max_audio_tokens(ctx))
  119. tokenizer = cached_get_tokenizer(ctx.model_config.tokenizer)
  120. new_prompt, new_token_ids = repeat_and_pad_placeholder_tokens(
  121. tokenizer,
  122. llm_inputs.get("prompt"),
  123. llm_inputs["prompt_token_ids"],
  124. placeholder_token_id=_AUDIO_PLACEHOLDER_TOKEN,
  125. repeat_count=audio_num_tokens,
  126. )
  127. # NOTE: Create a defensive copy of the original inputs
  128. return LLMInputs(prompt_token_ids=new_token_ids,
  129. prompt=new_prompt,
  130. multi_modal_data=multi_modal_data)
  131. class StackAudioFrames(nn.Module):
  132. """
  133. Stack the audio embedding frames to reduce the sequence length by a factor
  134. of `stack_factor`.
  135. """
  136. def __init__(self, stack_factor: int = 8):
  137. super().__init__()
  138. self.stack_factor = stack_factor
  139. def forward(self, audio_embeds: torch.Tensor) -> torch.Tensor:
  140. B, T, C = audio_embeds.shape
  141. T_pad = (T + self.stack_factor -
  142. 1) // self.stack_factor * self.stack_factor
  143. audio_embeds = F.pad(audio_embeds, (0, 0, 0, T_pad - T))
  144. B, T, C = audio_embeds.shape
  145. audio_embeds = audio_embeds.view(B, T // self.stack_factor,
  146. C * self.stack_factor)
  147. return audio_embeds
  148. class FlippedSiluAndMul(SiluAndMul):
  149. """Ultravox is trained with SwiGLU with flipped halves."""
  150. def forward(self, x: torch.Tensor):
  151. a, b = x.chunk(2, dim=-1)
  152. flipped = torch.cat((b, a), dim=-1)
  153. return super().forward(flipped)
  154. class UltravoxProjector(nn.Module):
  155. def __init__(self, config: UltravoxConfig):
  156. super().__init__()
  157. self.hidden_dim = config.hidden_size
  158. self._pad_and_stack = StackAudioFrames(config.stack_factor)
  159. dim = config.audio_config.hidden_size * config.stack_factor
  160. self.ln_pre = RMSNorm(dim)
  161. self.linear_1 = nn.Linear(dim, self.hidden_dim, bias=False)
  162. dim = self.hidden_dim
  163. if config.projector_act == "swiglu":
  164. self.act = FlippedSiluAndMul()
  165. dim = dim // 2
  166. else:
  167. self.act = get_act_fn(config.projector_act)
  168. self.linear_2 = nn.Linear(dim,
  169. config.text_config.hidden_size,
  170. bias=False)
  171. self.ln_post = RMSNorm(config.text_config.hidden_size)
  172. def forward(self, audio_features: torch.Tensor) -> torch.Tensor:
  173. audio_features = self._pad_and_stack(audio_features)
  174. audio_features = self.ln_pre(audio_features)
  175. hidden_states = self.linear_1(audio_features)
  176. hidden_states = self.act(hidden_states)
  177. hidden_states = self.linear_2(hidden_states)
  178. hidden_states = self.ln_post(hidden_states)
  179. return hidden_states
  180. class ModifiedWhisperEncoder(WhisperEncoder):
  181. """
  182. Encoder portion of OpenAI's Whisper model.
  183. This implementation is a slightly modified version of HF Transformers'
  184. Whisper Encoder, with only a few fixes:
  185. 1. base_model_prefix updated to allow for doing `.from_pretrained`
  186. directly on the encoder
  187. 2. allow less than 30 second of audio padding to be passed in:
  188. - relaxed ValueError check for `input_features` length to be less
  189. than or equal to `expected_seq_length` instead of strictly equal
  190. - embed_pos is now sliced to match the length of `inputs_embeds`
  191. Original: https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py
  192. See commentary: https://github.com/huggingface/transformers/issues/25744
  193. """
  194. base_model_prefix = "model.encoder"
  195. def forward(
  196. self,
  197. input_features,
  198. ):
  199. expected_seq_length = (self.config.max_source_positions *
  200. self.conv1.stride[0] * self.conv2.stride[0])
  201. if input_features.shape[-1] > expected_seq_length:
  202. raise ValueError(
  203. f"Whisper expects the mel input features to be of length "
  204. f"{expected_seq_length} or less, but found "
  205. f"{input_features.shape[-1]}. Make sure to pad the input mel "
  206. f"features to {expected_seq_length}.")
  207. inputs_embeds = nn.functional.gelu(self.conv1(input_features))
  208. inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
  209. inputs_embeds = inputs_embeds.permute(0, 2, 1)
  210. embed_pos = self.embed_positions.weight[:inputs_embeds.size(-2)]
  211. hidden_states = inputs_embeds + embed_pos
  212. hidden_states = nn.functional.dropout(hidden_states,
  213. p=self.dropout,
  214. training=self.training)
  215. for encoder_layer in self.layers:
  216. layer_outputs = encoder_layer(
  217. hidden_states,
  218. None,
  219. layer_head_mask=None,
  220. )
  221. hidden_states = layer_outputs[0]
  222. hidden_states = self.layer_norm(hidden_states)
  223. return hidden_states
  224. @MULTIMODAL_REGISTRY.register_input_mapper("audio", input_mapper_for_ultravox)
  225. @MULTIMODAL_REGISTRY.register_max_multimodal_tokens(
  226. "audio", get_ultravox_max_audio_tokens)
  227. @INPUT_REGISTRY.register_dummy_data(dummy_data_for_ultravox)
  228. @INPUT_REGISTRY.register_input_processor(input_processor_for_ultravox)
  229. class UltravoxModel(nn.Module, SupportsMultiModal):
  230. def __init__(self,
  231. config: UltravoxConfig,
  232. multimodal_config: MultiModalConfig,
  233. cache_config: Optional[CacheConfig] = None,
  234. quant_config: Optional["QuantizationConfig"] = None):
  235. super().__init__()
  236. self.config = config
  237. self.multi_modal_config = multimodal_config
  238. assert self.multi_modal_config
  239. if config.audio_model_id is not None:
  240. self.audio_tower = ModifiedWhisperEncoder.from_pretrained(
  241. config.audio_model_id)
  242. else:
  243. self.audio_tower = ModifiedWhisperEncoder(config.audio_config)
  244. self.multi_modal_projector = UltravoxProjector(config)
  245. self.language_model = init_aphrodite_registered_model(
  246. config.text_config, cache_config, quant_config)
  247. def _audio_features_to_embeddings(
  248. self, input_features: torch.Tensor) -> torch.Tensor:
  249. audio_input = input_features.to(self.audio_tower.dtype)
  250. audio_features = self.audio_tower(audio_input)
  251. audio_features = audio_features.to(self.audio_tower.dtype)
  252. audio_embeddings = self.multi_modal_projector(audio_features)
  253. return audio_embeddings
  254. def _parse_and_validate_audio_input(
  255. self, **kwargs: object) -> Optional[UltravoxAudioInputs]:
  256. audio_features = kwargs.pop("audio_features", None)
  257. audio_embeds = kwargs.pop("audio_embeds", None)
  258. if audio_features is None and audio_embeds is None:
  259. return None
  260. if audio_features is not None:
  261. if not isinstance(audio_features, (torch.Tensor, list)):
  262. raise ValueError("Incorrect type of audio features. "
  263. f"Got type: {type(audio_features)}")
  264. # Remove the N dimension until multiple audios are supported.
  265. if isinstance(audio_features, torch.Tensor):
  266. audio_features = audio_features.squeeze(1)
  267. else:
  268. audio_features = [t.squeeze(0) for t in audio_features]
  269. return UltravoxAudioFeatureInputs(type="audio_features",
  270. data=audio_features)
  271. if audio_embeds is not None:
  272. if not isinstance(audio_embeds, torch.Tensor):
  273. raise ValueError("Incorrect type of audio embeds. "
  274. f"Got type: {type(audio_embeds)}")
  275. # Remove the N dimension until multiple audios are supported.
  276. audio_embeds = audio_embeds.squeeze(1)
  277. return UltravoxAudioEmbeddingInputs(type="audio_embeds",
  278. data=audio_embeds)
  279. raise AssertionError("This line should be unreachable.")
  280. def _process_audio_input(
  281. self, audio_input: UltravoxAudioInputs
  282. ) -> Union[torch.Tensor, List[torch.Tensor]]:
  283. if audio_input["type"] == "audio_embeds":
  284. return audio_input["data"]
  285. audio_features = audio_input["data"]
  286. if isinstance(audio_features, list):
  287. # TODO: Batch these through the encoder/projector instead of
  288. # serializing them.
  289. return [
  290. self._audio_features_to_embeddings(
  291. features.unsqueeze(0)).squeeze(0)
  292. for features in audio_features
  293. ]
  294. else:
  295. return self._audio_features_to_embeddings(audio_features)
  296. def forward(self, input_ids: torch.Tensor, positions: torch.Tensor,
  297. kv_caches: List[torch.Tensor],
  298. attn_metadata: AttentionMetadata,
  299. intermediate_tensors: Optional[torch.Tensor],
  300. **kwargs) -> SamplerOutput:
  301. """Run forward pass for Ultravox
  302. One key thing to understand is the `input_ids` already accounts for the
  303. positions of the to-be-inserted audio embeddings. The to-be-inserted
  304. audio has a size that is essentially 6.25 tokens per second of audio.
  305. This way, the `positions` and `attn_metadata` are consistent
  306. with the `input_ids`.
  307. Args:
  308. input_features: A batch of audio inputs, [1, 80, M].
  309. """
  310. audio_input = self._parse_and_validate_audio_input(**kwargs)
  311. if audio_input is not None:
  312. audio_embeddings = self._process_audio_input(audio_input)
  313. inputs_embeds = self.language_model.model.get_input_embeddings(
  314. input_ids)
  315. inputs_embeds = merge_multimodal_embeddings(
  316. input_ids, inputs_embeds, audio_embeddings,
  317. _AUDIO_PLACEHOLDER_TOKEN)
  318. input_ids = None
  319. else:
  320. inputs_embeds = None
  321. hidden_states = self.language_model.model(
  322. input_ids=input_ids,
  323. positions=positions,
  324. kv_caches=kv_caches,
  325. attn_metadata=attn_metadata,
  326. intermediate_tensors=intermediate_tensors,
  327. inputs_embeds=inputs_embeds)
  328. return hidden_states
  329. def compute_logits(self, hidden_states: torch.Tensor,
  330. sampling_metadata: SamplingMetadata) -> torch.Tensor:
  331. return self.language_model.compute_logits(hidden_states,
  332. sampling_metadata)
  333. def sample(
  334. self,
  335. logits: torch.Tensor,
  336. sampling_metadata: SamplingMetadata,
  337. ) -> Optional[SamplerOutput]:
  338. return self.language_model.sample(logits, sampling_metadata)
  339. def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
  340. # prepare weight iterators for components
  341. projector_weights, llm_weights = itertools.tee(weights, 2)
  342. # load projector weights
  343. projector_weights = filter_weights(projector_weights,
  344. "multi_modal_projector")
  345. projector_params_dict = dict(
  346. self.multi_modal_projector.named_parameters())
  347. for name, loaded_weight in projector_weights:
  348. param = projector_params_dict[name]
  349. weight_loader = getattr(param, "weight_loader",
  350. default_weight_loader)
  351. weight_loader(param, loaded_weight)
  352. # load llm backbone
  353. llm_weights = filter_weights(llm_weights, "language_model")
  354. self.language_model.load_weights(llm_weights)