test_bitsandbytes.py 3.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. '''Tests whether bitsandbytes computation is enabled correctly.
  2. Run `pytest tests/quantization/test_bitsandbytes.py`.
  3. '''
  4. import pytest
  5. import torch
  6. from aphrodite import SamplingParams
  7. from tests.quantization.utils import is_quant_method_supported
  8. models_to_test = [
  9. ('huggyllama/llama-7b', 'quantize model inflight'),
  10. ('lllyasviel/omost-llama-3-8b-4bits', 'read pre-quantized model'),
  11. ]
  12. @pytest.mark.skipif(not is_quant_method_supported("bitsandbytes"),
  13. reason='bitsandbytes is not supported on this GPU type.')
  14. @pytest.mark.parametrize("model_name, description", models_to_test)
  15. def test_load_bnb_model(aphrodite_runner, model_name, description) -> None:
  16. with aphrodite_runner(model_name,
  17. quantization='bitsandbytes',
  18. load_format='bitsandbytes',
  19. enforce_eager=True) as llm:
  20. model = llm.model.llm_engine.model_executor.driver_worker.model_runner.model # noqa: E501
  21. # check the weights in MLP & SelfAttention are quantized to torch.uint8
  22. qweight = model.model.layers[0].mlp.gate_up_proj.qweight
  23. assert qweight.dtype == torch.uint8, (
  24. f'Expected gate_up_proj dtype torch.uint8 but got {qweight.dtype}')
  25. qweight = model.model.layers[0].mlp.down_proj.qweight
  26. assert qweight.dtype == torch.uint8, (
  27. f'Expected down_proj dtype torch.uint8 but got {qweight.dtype}')
  28. qweight = model.model.layers[0].self_attn.o_proj.qweight
  29. assert qweight.dtype == torch.uint8, (
  30. f'Expected o_proj dtype torch.uint8 but got {qweight.dtype}')
  31. qweight = model.model.layers[0].self_attn.qkv_proj.qweight
  32. assert qweight.dtype == torch.uint8, (
  33. f'Expected qkv_proj dtype torch.uint8 but got {qweight.dtype}')
  34. # some weights should not be quantized
  35. weight = model.lm_head.weight
  36. assert weight.dtype != torch.uint8, (
  37. 'lm_head weight dtype should not be torch.uint8')
  38. weight = model.model.embed_tokens.weight
  39. assert weight.dtype != torch.uint8, (
  40. 'embed_tokens weight dtype should not be torch.uint8')
  41. weight = model.model.layers[0].input_layernorm.weight
  42. assert weight.dtype != torch.uint8, (
  43. 'input_layernorm weight dtype should not be torch.uint8')
  44. weight = model.model.layers[0].post_attention_layernorm.weight
  45. assert weight.dtype != torch.uint8, (
  46. 'input_layernorm weight dtype should not be torch.uint8')
  47. # check the output of the model is expected
  48. sampling_params = SamplingParams(temperature=0.0,
  49. logprobs=1,
  50. prompt_logprobs=1,
  51. max_tokens=8)
  52. prompts = ['That which does not kill us', 'To be or not to be,']
  53. expected_outputs = [
  54. 'That which does not kill us makes us stronger.',
  55. 'To be or not to be, that is the question.'
  56. ]
  57. outputs = llm.generate(prompts, sampling_params=sampling_params)
  58. assert len(outputs) == len(prompts)
  59. for index in range(len(outputs)):
  60. # compare the first line of the output
  61. actual_output = outputs[index][1][0].split('\n', 1)[0]
  62. expected_output = expected_outputs[index].split('\n', 1)[0]
  63. assert len(actual_output) >= len(expected_output), (
  64. f'Actual {actual_output} should be larger than or equal to '
  65. f'expected {expected_output}')
  66. actual_output = actual_output[:len(expected_output)]
  67. assert actual_output == expected_output, (
  68. f'Expected: {expected_output}, but got: {actual_output}')