offline_inference.py 840 B

1234567891011121314151617181920212223
  1. from aphrodite import LLM, SamplingParams
  2. # Sample prompts.
  3. prompts = [
  4. "Once upon a time,",
  5. "In a galaxy far, far away,",
  6. "The quick brown fox jumps over the lazy dog.",
  7. "The meaning of life is",
  8. ]
  9. # Create a sampling params object.
  10. sampling_params = SamplingParams(temperature=1.15, min_p=0.06)
  11. # Create an LLM.
  12. llm = LLM(model="NousResearch/Meta-Llama-3.1-8B-Instruct"
  13. ) # pass additional arguments here, such as `quantization`
  14. # Generate texts from the prompts. The output is a list of RequestOutput objects
  15. # that contain the prompt, generated text, and other information.
  16. outputs = llm.generate(prompts, sampling_params)
  17. # Print the outputs.
  18. for output in outputs:
  19. prompt = output.prompt
  20. generated_text = output.outputs[0].text
  21. print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")