test_mixtral.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. from typing import List
  2. import pytest
  3. import torch
  4. import aphrodite
  5. from aphrodite.lora.request import LoRARequest
  6. MODEL_PATH = "mistralai/Mixtral-8x7B-Instruct-v0.1"
  7. def do_sample(llm: aphrodite.LLM, lora_path: str, lora_id: int) -> List[str]:
  8. prompts = [
  9. "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nSpellForce 3 is a pretty bad game. The developer Grimlore Games is clearly a bunch of no-talent hacks, and 2017 was a terrible year for games anyway. [/user] [assistant]", # noqa: E501
  10. "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nI wanted to like Grimlore Games' 2017 entry, but in SpellForce 3 they just didn't get anything right. [/user] [assistant]", # noqa: E501
  11. "[system] Given a target sentence construct the underlying meaning representation\nof the input sentence as a single function with attributes and attribute\nvalues. This function should describe the target string accurately and the\nfunction must be one of the following ['inform', 'request', 'give_opinion',\n'confirm', 'verify_attribute', 'suggest', 'request_explanation',\n'recommend', 'request_attribute'].\n\nThe attributes must be one of the following:\n['name', 'exp_release_date', 'release_year', 'developer', 'esrb', 'rating',\n'genres', 'player_perspective', 'has_multiplayer', 'platforms',\n'available_on_steam', 'has_linux_release', 'has_mac_release', 'specifier'] [/system] [user] Here is the target sentence:\nBioShock is a good role-playing, action-adventure, shooter that released for PlayStation, Xbox, and PC in 2007. It is available on Steam, and it has a Mac release but not a Linux release. [/user] [assistant]", # noqa: E501
  12. ]
  13. sampling_params = aphrodite.SamplingParams(temperature=0, max_tokens=256)
  14. outputs = llm.generate(
  15. prompts,
  16. sampling_params,
  17. lora_request=LoRARequest(str(lora_id), lora_id, lora_path)
  18. if lora_id else None)
  19. # Print the outputs.
  20. generated_texts: List[str] = []
  21. for output in outputs:
  22. prompt = output.prompt
  23. generated_text = output.outputs[0].text.strip()
  24. generated_texts.append(generated_text)
  25. print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
  26. return generated_texts
  27. @pytest.mark.parametrize("tp_size", [4])
  28. def test_mixtral_lora(mixtral_lora_files, tp_size):
  29. if torch.cuda.device_count() < tp_size:
  30. pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}")
  31. llm = aphrodite.LLM(MODEL_PATH,
  32. enable_lora=True,
  33. max_num_seqs=16,
  34. max_loras=4,
  35. distributed_executor_backend="ray",
  36. tensor_parallel_size=tp_size)
  37. expected_lora_output = [
  38. "give_opinion(name[SpellForce 3], release_year[2017], developer[Grimlore Games], rating[poor])", # noqa: E501
  39. "give_opinion(name[SpellForce 3], developer[Grimlore Games], release_year[2017], rating[poor])", # noqa: E501
  40. "inform(name[BioShock], release_year[2007], rating[good], genres[action-adventure, role-playing, shooter], platforms[PlayStation, Xbox, PC], available_on_steam[yes], has_linux_release[no], has_mac_release[yes])", # noqa: E501
  41. ]
  42. assert do_sample(llm, mixtral_lora_files,
  43. lora_id=1) == expected_lora_output
  44. assert do_sample(llm, mixtral_lora_files,
  45. lora_id=2) == expected_lora_output