1
0

offline_inference.py 969 B

1234567891011121314151617181920212223
  1. from aphrodite import LLM, SamplingParams
  2. # Sample prompts.
  3. prompts = [
  4. "<|system|>Enter chat mode.<|user|>Hello!<|model|>",
  5. "<|system|>Enter RP mode.<|model|>Hello!<|user|>What are you doing?",
  6. "<|system|>Enter chat mode.<|user|>What is the meaning of life?<|model|>",
  7. "<|system|>Enter QA mode.<|user|>What is a man?<|model|>A miserable",
  8. ]
  9. # Create a sampling params object.
  10. sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
  11. # Create an LLM.
  12. llm = LLM(model="PygmalionAI/pygmalion-2-7b"
  13. ) # pass additional arguments here, such as `quantization`
  14. # Generate texts from the prompts. The output is a list of RequestOutput objects
  15. # that contain the prompt, generated text, and other information.
  16. outputs = llm.generate(prompts, sampling_params)
  17. # Print the outputs.
  18. for output in outputs:
  19. prompt = output.prompt
  20. generated_text = output.outputs[0].text
  21. print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")