audio_example.py 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. """
  2. This example shows how to use vLLM for running offline inference
  3. with the correct prompt format on vision language models.
  4. For most models, the prompt format should follow corresponding examples
  5. on HuggingFace model repository.
  6. """
  7. import os
  8. import librosa
  9. from transformers import AutoTokenizer
  10. from aphrodite import LLM, SamplingParams
  11. from aphrodite.common.utils import FlexibleArgumentParser
  12. # Input audio and question
  13. audio_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
  14. "mary_had_lamb.ogg")
  15. audio_and_sample_rate = librosa.load(audio_path, sr=None)
  16. question = "What is recited in the audio?"
  17. # Ultravox 0.3
  18. def run_ultravox(question):
  19. model_name = "fixie-ai/ultravox-v0_3"
  20. tokenizer = AutoTokenizer.from_pretrained(model_name)
  21. messages = [{
  22. 'role': 'user',
  23. 'content': f"<|reserved_special_token_0|>\n{question}"
  24. }]
  25. prompt = tokenizer.apply_chat_template(messages,
  26. tokenize=False,
  27. add_generation_prompt=True)
  28. llm = LLM(model=model_name)
  29. stop_token_ids = None
  30. return llm, prompt, stop_token_ids
  31. model_example_map = {
  32. "ultravox": run_ultravox,
  33. }
  34. def main(args):
  35. model = args.model_type
  36. if model not in model_example_map:
  37. raise ValueError(f"Model type {model} is not supported.")
  38. llm, prompt, stop_token_ids = model_example_map[model](question)
  39. # We set temperature to 0.2 so that outputs can be different
  40. # even when all prompts are identical when running batch inference.
  41. sampling_params = SamplingParams(temperature=0.2,
  42. max_tokens=64,
  43. stop_token_ids=stop_token_ids)
  44. assert args.num_prompts > 0
  45. if args.num_prompts == 1:
  46. # Single inference
  47. inputs = {
  48. "prompt": prompt,
  49. "multi_modal_data": {
  50. "audio": audio_and_sample_rate
  51. },
  52. }
  53. else:
  54. # Batch inference
  55. inputs = [{
  56. "prompt": prompt,
  57. "multi_modal_data": {
  58. "audio": audio_and_sample_rate
  59. },
  60. } for _ in range(args.num_prompts)]
  61. outputs = llm.generate(inputs, sampling_params=sampling_params)
  62. for o in outputs:
  63. generated_text = o.outputs[0].text
  64. print(generated_text)
  65. if __name__ == "__main__":
  66. parser = FlexibleArgumentParser(
  67. description='Demo on using Aphrodite for offline inference with '
  68. 'audio language models')
  69. parser.add_argument('--model-type',
  70. '-m',
  71. type=str,
  72. default="ultravox",
  73. choices=model_example_map.keys(),
  74. help='Huggingface "model_type".')
  75. parser.add_argument('--num-prompts',
  76. type=int,
  77. default=1,
  78. help='Number of prompts to run.')
  79. args = parser.parse_args()
  80. main(args)