test_bloom.py 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445
  1. import pytest
  2. import aphrodite
  3. from aphrodite.prompt_adapter.request import PromptAdapterRequest
  4. MODEL_PATH = "bigscience/bloomz-560m"
  5. PA_PATH = 'stevhliu/bloomz-560m_PROMPT_TUNING_CAUSAL_LM'
  6. def do_sample(llm, pa_name: str, pa_id: int):
  7. prompts = [
  8. "Tweet text : @nationalgridus I have no water and the bill is \
  9. current and paid. Can you do something about this? Label : ",
  10. "Tweet text : @nationalgridus Looks good thanks! Label : "
  11. ]
  12. sampling_params = aphrodite.SamplingParams(temperature=0.0,
  13. max_tokens=3,
  14. stop_token_ids=[3])
  15. outputs = llm.generate(prompts,
  16. sampling_params,
  17. prompt_adapter_request=PromptAdapterRequest(
  18. pa_name, pa_id, PA_PATH, 8) if pa_id else None)
  19. # Print the outputs.
  20. generated_texts = []
  21. for output in outputs:
  22. prompt = output.prompt
  23. generated_text = output.outputs[0].text.strip()
  24. generated_texts.append(generated_text)
  25. print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
  26. return generated_texts
  27. @pytest.mark.parametrize("enforce_eager", [True, False])
  28. def test_twitter_prompt_adapter(enforce_eager: bool):
  29. llm = aphrodite.LLM(MODEL_PATH,
  30. enforce_eager=enforce_eager,
  31. enable_prompt_adapter=True,
  32. max_prompt_adapter_token=8)
  33. expected_output = ['complaint', 'no complaint']
  34. assert do_sample(llm, "twitter_pa", pa_id=1) == expected_output