test_layernorm.py 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. import pytest
  2. import torch
  3. from aphrodite.modeling.layers.layernorm import RMSNorm
  4. DTYPES = [torch.half, torch.bfloat16, torch.float]
  5. NUM_TOKENS = [7, 83, 4096] # Arbitrary values for testing
  6. HIDDEN_SIZES = [768, 769, 770, 771, 5120, 5124, 5125, 5126, 8192,
  7. 8199] # Arbitrary values for testing
  8. ADD_RESIDUAL = [False, True]
  9. SEEDS = [0]
  10. CUDA_DEVICES = [
  11. f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
  12. ]
  13. @pytest.mark.parametrize("num_tokens", NUM_TOKENS)
  14. @pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
  15. @pytest.mark.parametrize("add_residual", ADD_RESIDUAL)
  16. @pytest.mark.parametrize("dtype", DTYPES)
  17. @pytest.mark.parametrize("seed", SEEDS)
  18. @pytest.mark.parametrize("device", CUDA_DEVICES)
  19. @torch.inference_mode()
  20. def test_rms_norm(
  21. num_tokens: int,
  22. hidden_size: int,
  23. add_residual: bool,
  24. dtype: torch.dtype,
  25. seed: int,
  26. device: str,
  27. ) -> None:
  28. torch.random.manual_seed(seed)
  29. if torch.cuda.is_available():
  30. torch.cuda.manual_seed(seed)
  31. torch.set_default_device(device)
  32. layer = RMSNorm(hidden_size).to(dtype=dtype)
  33. layer.weight.data.normal_(mean=1.0, std=0.1)
  34. scale = 1 / (2 * hidden_size)
  35. x = torch.randn(num_tokens, hidden_size, dtype=dtype)
  36. x *= scale
  37. residual = torch.randn_like(x) * scale if add_residual else None
  38. # NOTE: The reference implementation should be executed first
  39. # because the custom kernel is in-place.
  40. ref_out = layer.forward_native(x, residual)
  41. out = layer(x, residual)
  42. # NOTE: LayerNorm operators (including RMS) typically have larger
  43. # numerical errors than other operators because they involve reductions.
  44. # Therefore, we use a larger tolerance.
  45. if add_residual:
  46. torch.testing.assert_close(out[0], ref_out[0], atol=1e-2, rtol=1e-2)
  47. torch.testing.assert_close(out[1], ref_out[1], atol=1e-2, rtol=1e-2)
  48. else:
  49. torch.testing.assert_close(out, ref_out, atol=1e-2, rtol=1e-2)