arctic_inference.py 873 B

1234567891011121314151617181920212223242526
  1. from aphrodite import LLM, SamplingParams
  2. # Sample prompts.
  3. prompts = [
  4. "Once upon a time,",
  5. "In a galaxy far, far away,",
  6. "The quick brown fox jumps over the lazy dog.",
  7. "The meaning of life is",
  8. ]
  9. # Create a sampling params object.
  10. sampling_params = SamplingParams(temperature=1.15, min_p=0.06)
  11. # Create an LLM.
  12. llm = LLM(model="snowflake/snowflake-arctic-instruct",
  13. quantization="deepspeedfp",
  14. tensor_parallel_size=8,
  15. trust_remote_code=True)
  16. # Generate texts from the prompts. The output is a list of RequestOutput objects
  17. # that contain the prompt, generated text, and other information.
  18. outputs = llm.generate(prompts, sampling_params)
  19. # Print the outputs.
  20. for output in outputs:
  21. prompt = output.prompt
  22. generated_text = output.outputs[0].text
  23. print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")