bgmv_shrink.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. """
  2. Based on:
  3. Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023).
  4. Punica: Multi-Tenant LoRA Serving.
  5. https://arxiv.org/abs/2310.18547
  6. """
  7. import torch
  8. import triton
  9. import triton.language as tl
  10. from .utils import get_lora_op_configs
  11. @triton.jit
  12. def _bgmv_shrink_kernel(
  13. input_ptr,
  14. lora_ptr,
  15. out_ptr,
  16. N,
  17. K,
  18. lora_indices,
  19. scaling,
  20. xm_stride,
  21. xk_stride,
  22. l0_stride,
  23. lora_k_stride,
  24. lora_n_stride,
  25. cm_stride,
  26. cn_stride,
  27. BLOCK_N: tl.constexpr,
  28. BLOCK_K: tl.constexpr,
  29. SPLIT_K: tl.constexpr,
  30. ):
  31. """
  32. GroupGEMV, additionally, introducing SPLIT-K can improve large hidden_size's
  33. performance
  34. """
  35. pid_sk = tl.program_id(axis=0)
  36. cur_batch = tl.program_id(axis=1)
  37. lora_index = tl.load(lora_indices + cur_batch)
  38. if lora_index == -1:
  39. return
  40. offset_n = tl.arange(0, BLOCK_N)
  41. offset_k = tl.arange(0, BLOCK_K) + pid_sk * BLOCK_K
  42. a_ptr = input_ptr + cur_batch * xm_stride
  43. b_ptr = lora_ptr + l0_stride * lora_index
  44. accumulator = tl.zeros((BLOCK_N, ), dtype=tl.float32)
  45. for k in range(0, K, BLOCK_K * SPLIT_K):
  46. current_k = k + offset_k
  47. current_k_c = tl.max_contiguous(current_k, BLOCK_K)
  48. tiled_a = tl.load(
  49. a_ptr + current_k_c,
  50. mask=current_k < K,
  51. other=0.0,
  52. ) # [BLOCK_K]
  53. b_ptr_mask = (offset_n[:, None] < N) & (current_k[None, :] < K)
  54. tiled_b = tl.load(
  55. b_ptr + offset_n[:, None] * lora_k_stride +
  56. current_k[None, :] * lora_n_stride,
  57. mask=b_ptr_mask,
  58. other=0.0,
  59. ) # [BLOCK_N,BLOCK_K]
  60. accumulator += tl.sum(tiled_a * tiled_b, 1)
  61. accumulator *= scaling
  62. offset_cn = tl.arange(0, BLOCK_N)
  63. c_ptr = out_ptr + cur_batch * cm_stride + offset_cn * cn_stride
  64. c_mask = offset_cn < N
  65. if SPLIT_K == 1:
  66. tl.store(c_ptr, accumulator, mask=c_mask)
  67. else:
  68. tl.atomic_add(c_ptr, accumulator, mask=c_mask)
  69. @torch.inference_mode()
  70. def _bgmv_shrink(
  71. inputs: torch.Tensor,
  72. lora_a_weights: torch.Tensor,
  73. output_tensor: torch.Tensor,
  74. lora_indices_tensor: torch.Tensor,
  75. scaling: float = 1.0,
  76. ) -> None:
  77. """
  78. Args:
  79. inputs (torch.Tensor): input tensor
  80. lora_a_weights (torch.Tensor): lora'a weight
  81. output_tensor (torch.Tensor): output tensor
  82. lora_indices_tensor (torch.Tensor): (batch_size,). The LoRA index
  83. corresponding to each batch. An index of -1 means no lora should be
  84. applied.
  85. batches (int): batch size
  86. scaling (float): Scaling factor.
  87. """
  88. assert inputs.dtype == lora_a_weights.dtype
  89. assert inputs.dtype in [torch.float16, torch.bfloat16]
  90. assert lora_a_weights.dtype in [
  91. torch.float16,
  92. torch.bfloat16,
  93. ]
  94. assert inputs.size(1) == lora_a_weights.size(-1)
  95. assert inputs.is_contiguous()
  96. if lora_a_weights.ndim == 4: # shape:(lora_num,1,rank, size)
  97. assert lora_a_weights.size(1) == 1
  98. lora_a_weights = lora_a_weights.squeeze(dim=1)
  99. else:
  100. assert lora_a_weights.ndim == 3 # shape:(lora_num,rank, size)
  101. assert lora_a_weights.is_contiguous()
  102. assert output_tensor.is_contiguous()
  103. # TODO tuning this config
  104. batches = lora_indices_tensor.size(0)
  105. N, K = lora_a_weights.shape[-2:] # K=hidden_size,N=rank
  106. BLOCK_N = triton.next_power_of_2(N)
  107. # First try to load optimal config from the file
  108. config = get_lora_op_configs("bgmv_shrink", batches, K)
  109. grid = lambda META: (
  110. META["SPLIT_K"],
  111. batches,
  112. )
  113. _bgmv_shrink_kernel[grid](
  114. inputs,
  115. lora_a_weights,
  116. output_tensor,
  117. N,
  118. K,
  119. lora_indices_tensor,
  120. scaling,
  121. inputs.stride(0),
  122. inputs.stride(1),
  123. lora_a_weights.stride(0),
  124. lora_a_weights.stride(1),
  125. lora_a_weights.stride(2),
  126. output_tensor.stride(0),
  127. output_tensor.stride(1),
  128. BLOCK_N=BLOCK_N,
  129. **config,
  130. )
  131. return
  132. try:
  133. bgmv_shrink = torch.library.custom_op("lora::bgmv_shrink",
  134. _bgmv_shrink,
  135. mutates_args=["output_tensor"])
  136. except AttributeError:
  137. bgmv_shrink = _bgmv_shrink