test_cpu_offload.py 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. # Expanded quantized model tests for CPU offloading
  2. # Base tests: tests/basic_correctness/test_cpu_offload.py
  3. import pytest
  4. from tests.quantization.utils import is_quant_method_supported
  5. from ..utils import compare_two_settings
  6. @pytest.mark.skipif(not is_quant_method_supported("fp8"),
  7. reason="fp8 is not supported on this GPU type.")
  8. def test_cpu_offload_fp8():
  9. # Test quantization of an unquantized checkpoint
  10. compare_two_settings("meta-llama/Meta-Llama-3-8B-Instruct",
  11. ["--quantization", "fp8"],
  12. ["--quantization", "fp8", "--cpu-offload-gb", "2"])
  13. # Test loading a quantized checkpoint
  14. compare_two_settings("neuralmagic/Meta-Llama-3-8B-Instruct-FP8", [],
  15. ["--cpu-offload-gb", "2"])
  16. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  17. reason="gptq_marlin is not supported on this GPU type.")
  18. def test_cpu_offload_gptq():
  19. # Test GPTQ Marlin
  20. compare_two_settings("Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4", [],
  21. ["--cpu-offload-gb", "1"])
  22. # Test GPTQ
  23. compare_two_settings("Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4",
  24. ["--quantization", "gptq"],
  25. ["--quantization", "gptq", "--cpu-offload-gb", "1"])
  26. @pytest.mark.skipif(not is_quant_method_supported("awq_marlin"),
  27. reason="awq_marlin is not supported on this GPU type.")
  28. def test_cpu_offload_awq():
  29. # Test AWQ Marlin
  30. compare_two_settings("Qwen/Qwen2-1.5B-Instruct-AWQ", [],
  31. ["--cpu-offload-gb", "1"])
  32. # Test AWQ
  33. compare_two_settings("Qwen/Qwen2-1.5B-Instruct-AWQ",
  34. ["--quantization", "awq"],
  35. ["--quantization", "awq", "--cpu-offload-gb", "1"])
  36. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  37. reason="gptq_marlin is not supported on this GPU type.")
  38. def test_cpu_offload_compressed_tensors():
  39. # Test wNa16
  40. compare_two_settings("nm-testing/tinyllama-oneshot-w4a16-channel-v2", [],
  41. ["--cpu-offload-gb", "1"])
  42. # Test w4a16_marlin24
  43. compare_two_settings("nm-testing/llama7b-one-shot-2_4-w4a16-marlin24-t",
  44. [], ["--cpu-offload-gb", "1"])
  45. # Test w8a8
  46. compare_two_settings(
  47. "nm-testing/tinyllama-oneshot-w8w8-test-static-shape-change", [],
  48. ["--cpu-offload-gb", "1"])