phi3v_example.py 1.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. import os
  2. from PIL import Image
  3. from aphrodite import LLM, SamplingParams
  4. def run_phi3v():
  5. model_path = "microsoft/Phi-3-vision-128k-instruct"
  6. # Note: The default setting of max_num_seqs (256) and
  7. # max_model_len (128k) for this model may cause OOM.
  8. # In this example, we override max_num_seqs to 5 while
  9. # keeping the original context length of 128k.
  10. # You may lower either max_num_seqs or max_model_len
  11. # to run this example on a machine with limited memory.
  12. llm = LLM(
  13. model=model_path,
  14. trust_remote_code=True,
  15. max_num_seqs=5,
  16. )
  17. image_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
  18. "burg.jpg")
  19. image = Image.open(image_path)
  20. # single-image prompt
  21. prompt = "<|user|>\n<|image_1|>\nWhat is shown in this image?<|end|>\n<|assistant|>\n" # noqa: E501
  22. sampling_params = SamplingParams(temperature=1.1,
  23. min_p=0.06,
  24. max_tokens=512)
  25. outputs = llm.generate(
  26. {
  27. "prompt": prompt,
  28. "multi_modal_data": {
  29. "image": image
  30. },
  31. },
  32. sampling_params=sampling_params)
  33. for o in outputs:
  34. generated_text = o.outputs[0].text
  35. print(generated_text)
  36. if __name__ == "__main__":
  37. run_phi3v()