llava_next_example.py 934 B

1234567891011121314151617181920212223242526272829303132
  1. import os
  2. from PIL import Image
  3. from aphrodite import LLM, SamplingParams
  4. def run_llava_next():
  5. llm = LLM(model="llava-hf/llava-v1.6-mistral-7b-hf", max_model_len=4096)
  6. prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
  7. image_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
  8. "burg.jpg")
  9. image = Image.open(image_path)
  10. sampling_params = SamplingParams(temperature=1.1,
  11. min_p=0.06,
  12. max_tokens=512)
  13. outputs = llm.generate(
  14. {
  15. "prompt": prompt,
  16. "multi_modal_data": {
  17. "image": image
  18. }
  19. },
  20. sampling_params=sampling_params)
  21. generated_text = ""
  22. for o in outputs:
  23. generated_text += o.outputs[0].text
  24. print(f"LLM output:{generated_text}")
  25. if __name__ == "__main__":
  26. run_llava_next()