tpu_inference.py 1.1 KB

12345678910111213141516171819202122232425262728
  1. from aphrodite import LLM, SamplingParams
  2. prompts = [
  3. "A robot may not injure a human being",
  4. "It is only with the heart that one can see rightly;",
  5. "The greatest glory in living lies not in never falling,",
  6. ]
  7. answers = [
  8. " or, through inaction, allow a human being to come to harm.",
  9. " what is essential is invisible to the eye.",
  10. " but in rising every time we fall.",
  11. ]
  12. N = 1
  13. # Currently, top-p sampling is disabled. `top_p` should be 1.0.
  14. sampling_params = SamplingParams(temperature=0.7,
  15. top_p=1.0,
  16. n=N,
  17. max_tokens=16)
  18. # Set `enforce_eager=True` to avoid ahead-of-time compilation.
  19. # In real workloads, `enforace_eager` should be `False`.
  20. llm = LLM(model="google/gemma-2b", enforce_eager=True)
  21. outputs = llm.generate(prompts, sampling_params)
  22. for output, answer in zip(outputs, answers):
  23. prompt = output.prompt
  24. generated_text = output.outputs[0].text
  25. print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
  26. assert generated_text.startswith(answer)