test_attention_selector.py 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. from unittest.mock import patch
  2. import pytest
  3. import torch
  4. from aphrodite.attention.selector import which_attn_to_use
  5. from aphrodite.common.utils import STR_FLASH_ATTN_VAL, STR_INVALID_VAL
  6. from tests.kernels.utils import override_backend_env_variable
  7. @pytest.mark.parametrize(
  8. "name", ["TORCH_SDPA", "ROCM_FLASH", "XFORMERS", "FLASHINFER", "OPENVINO"])
  9. @pytest.mark.parametrize("device", ["cpu", "openvino", "hip", "cuda"])
  10. def test_env(name: str, device: str, monkeypatch):
  11. """Test that the attention selector can be set via environment variable.
  12. Note that we do not test FlashAttn because it is the default backend.
  13. """
  14. override_backend_env_variable(monkeypatch, name)
  15. if device == "cpu":
  16. with patch("aphrodite.attention.selector.is_cpu", return_value=True):
  17. backend = which_attn_to_use(8, 16, 8, None, torch.float16,
  18. torch.float16, 16)
  19. assert backend.name == "TORCH_SDPA"
  20. elif device == "hip":
  21. with patch("aphrodite.attention.selector.is_hip", return_value=True):
  22. backend = which_attn_to_use(8, 16, 8, None, torch.float16,
  23. torch.float16, 16)
  24. assert backend.name == "ROCM_FLASH"
  25. elif device == "openvino":
  26. with patch("aphrodite.attention.selector.is_openvino",
  27. return_value=True):
  28. backend = which_attn_to_use(8, 16, 8, None, torch.float16,
  29. torch.float16, 16)
  30. assert backend.name == "OPENVINO"
  31. else:
  32. backend = which_attn_to_use(8, 16, 8, None, torch.float16,
  33. torch.float16, 16)
  34. assert backend.name == name
  35. def test_flash_attn(monkeypatch):
  36. """Test FlashAttn validation."""
  37. override_backend_env_variable(monkeypatch, STR_FLASH_ATTN_VAL)
  38. # Unsupported CUDA arch
  39. with patch("torch.cuda.get_device_capability", return_value=[7, 5]):
  40. backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)
  41. assert backend.name != STR_FLASH_ATTN_VAL
  42. # Unsupported data type
  43. backend = which_attn_to_use(8, 16, 8, None, torch.float8_e4m3fn, None, 16)
  44. assert backend.name != STR_FLASH_ATTN_VAL
  45. # Unsupported kv cache data type
  46. backend = which_attn_to_use(8, 16, 8, None, torch.float16, "fp8", 16)
  47. assert backend.name != STR_FLASH_ATTN_VAL
  48. # Unsupported block size
  49. backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 8)
  50. assert backend.name != STR_FLASH_ATTN_VAL
  51. # Unsupported sliding window
  52. backend = which_attn_to_use(8, 16, 8, 1, torch.float16, None, 16)
  53. assert backend.name != STR_FLASH_ATTN_VAL
  54. # flash-attn is not installed
  55. with patch.dict('sys.modules', {'aphrodite_flash_attn': None}):
  56. backend = which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)
  57. assert backend.name != STR_FLASH_ATTN_VAL
  58. # Unsupported head size
  59. backend = which_attn_to_use(8, 17, 8, None, torch.float16, None, 16)
  60. assert backend.name != STR_FLASH_ATTN_VAL
  61. def test_invalid_env(monkeypatch):
  62. """Throw an exception if the backend name is invalid."""
  63. override_backend_env_variable(monkeypatch, STR_INVALID_VAL)
  64. with pytest.raises(ValueError):
  65. which_attn_to_use(8, 16, 8, None, torch.float16, None, 16)