offline_inference.py 965 B

12345678910111213141516171819202122
  1. from aphrodite import LLM, SamplingParams
  2. # Sample prompts.
  3. prompts = [
  4. "<|system|>Enter chat mode.<|user|>Hello!<|model|>",
  5. "<|system|>Enter RP mode.<|model|>Hello!<|user|>What are you doing?<|model|>",
  6. "<|system|>Enter chat mode.<|user|>What is the meaning of life?<|model|>",
  7. "<|system|>Enter QA mode.<|user|>What is a man?<|model|>A miserable",
  8. ]
  9. # Create a sampling params object.
  10. sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
  11. # Create an LLM.
  12. llm = LLM(model="PygmalionAI/pygmalion-2-7b") # pass additional arguments here, such as `quantization`
  13. # Generate texts from the prompts. The output is a list of RequestOutput objects
  14. # that contain the prompt, generated text, and other information.
  15. outputs = llm.generate(prompts, sampling_params)
  16. # Print the outputs.
  17. for output in outputs:
  18. prompt = output.prompt
  19. generated_text = output.outputs[0].text
  20. print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")