1
0

fused_softmax.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. # [2022-10-23] Copied from https://github.com/NVIDIA/apex/blob/master/apex/transformer/functional/fused_softmax.py
  2. # for benchmarking.
  3. # We added support for seqlen=2k and seqlen=4k
  4. # coding=utf-8
  5. # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
  6. #
  7. # Licensed under the Apache License, Version 2.0 (the "License");
  8. # you may not use this file except in compliance with the License.
  9. # You may obtain a copy of the License at
  10. #
  11. # http://www.apache.org/licenses/LICENSE-2.0
  12. #
  13. # Unless required by applicable law or agreed to in writing, software
  14. # distributed under the License is distributed on an "AS IS" BASIS,
  15. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. # See the License for the specific language governing permissions and
  17. # limitations under the License.
  18. import torch
  19. from apex._autocast_utils import _cast_if_autocast_enabled
  20. from apex.transformer.enums import AttnMaskType
  21. from fused_softmax_lib import (
  22. scaled_masked_softmax_backward,
  23. scaled_masked_softmax_forward,
  24. scaled_masked_softmax_get_batch_per_block,
  25. scaled_upper_triang_masked_softmax_backward,
  26. scaled_upper_triang_masked_softmax_forward,
  27. )
  28. class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
  29. """
  30. Fused operation which performs following three operations in sequence
  31. 1. Scale the tensor.
  32. 2. Apply upper triangular mask (typically used in gpt models).
  33. 3. Perform softmax.
  34. """
  35. @staticmethod
  36. def forward(ctx, inputs, scale):
  37. scale_t = torch.tensor([scale])
  38. softmax_results = scaled_upper_triang_masked_softmax_forward(inputs, scale_t[0])
  39. ctx.save_for_backward(softmax_results, scale_t)
  40. return softmax_results
  41. @staticmethod
  42. def backward(ctx, output_grads):
  43. softmax_results, scale_t = ctx.saved_tensors
  44. input_grads = scaled_upper_triang_masked_softmax_backward(
  45. output_grads, softmax_results, scale_t[0]
  46. )
  47. return input_grads, None
  48. def scaled_upper_triang_masked_softmax(inputs, _, scale):
  49. b, np, sq, sk = inputs.size()
  50. assert sq == sk, "causal mask is only for self attention"
  51. # Reshaping input to 3D tensor (attn_batches, sq, sk)
  52. inputs = inputs.view(-1, sq, sk)
  53. args = _cast_if_autocast_enabled(inputs, scale)
  54. with torch.cuda.amp.autocast(enabled=False):
  55. probs = ScaledUpperTriangMaskedSoftmax.apply(*args)
  56. return probs.view(b, np, sq, sk)
  57. # NOTE (mkozuki): `ScaledMaskedSoftmax` somehow doesn't work well with `torch.cuda.amp.custom_fwd`.
  58. # Without `cast_inputs` kwarg, somehow inputs are not cast to dtype used in the autocast context.
  59. # So I needed to manually write two `torch.autograd.Function` inheritances.
  60. # Fused operation which performs following three operations in sequence
  61. # 1. Scale the tensor.
  62. # 2. Apply the mask.
  63. # 3. Perform softmax.
  64. class ScaledMaskedSoftmax(torch.autograd.Function):
  65. @staticmethod
  66. def forward(ctx, inputs, mask, scale):
  67. scale_t = torch.tensor([scale])
  68. softmax_results = scaled_masked_softmax_forward(inputs, mask, scale_t[0])
  69. ctx.save_for_backward(softmax_results, scale_t)
  70. return softmax_results
  71. @staticmethod
  72. def backward(ctx, output_grads):
  73. softmax_results, scale_t = ctx.saved_tensors
  74. input_grads = scaled_masked_softmax_backward(output_grads, softmax_results, scale_t[0])
  75. return input_grads, None, None
  76. def scaled_masked_softmax(inputs, mask, scale):
  77. # input is 4D tensor (b, np, sq, sk)
  78. args = _cast_if_autocast_enabled(inputs, mask, scale)
  79. with torch.cuda.amp.autocast(enabled=False):
  80. return ScaledMaskedSoftmax.apply(*args)
  81. class FusedScaleMaskSoftmax(torch.nn.Module):
  82. """
  83. fused operation: scaling + mask + softmax
  84. Arguments:
  85. input_in_fp16: flag to indicate if input in fp16 data format.
  86. input_in_bf16: flag to indicate if input in bf16 data format.
  87. attn_mask_type: attention mask type (pad or causal)
  88. scaled_masked_softmax_fusion: flag to indicate user want to use softmax fusion
  89. mask_func: mask function to be applied.
  90. softmax_in_fp32: if true, softmax in performed at fp32 precision.
  91. scale: scaling factor used in input tensor scaling.
  92. """
  93. def __init__(
  94. self,
  95. input_in_fp16,
  96. input_in_bf16,
  97. attn_mask_type,
  98. scaled_masked_softmax_fusion,
  99. mask_func,
  100. softmax_in_fp32,
  101. scale,
  102. ):
  103. super().__init__()
  104. self.input_in_fp16 = input_in_fp16
  105. self.input_in_bf16 = input_in_bf16
  106. if self.input_in_fp16 and self.input_in_bf16:
  107. raise RuntimeError("both fp16 and bf16 flags cannot be active at the same time.")
  108. self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
  109. self.attn_mask_type = attn_mask_type
  110. self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion
  111. self.mask_func = mask_func
  112. self.softmax_in_fp32 = softmax_in_fp32
  113. self.scale = scale
  114. if not (self.scale is None or softmax_in_fp32):
  115. raise RuntimeError("softmax should be in fp32 when scaled")
  116. if self.scaled_masked_softmax_fusion:
  117. if self.attn_mask_type == AttnMaskType.causal:
  118. self.fused_softmax_func = scaled_upper_triang_masked_softmax
  119. elif self.attn_mask_type == AttnMaskType.padding:
  120. self.fused_softmax_func = scaled_masked_softmax
  121. else:
  122. raise ValueError("Invalid attn_mask_type.")
  123. def forward(self, input, mask):
  124. # [b, np, sq, sk]
  125. assert input.dim() == 4
  126. if self.is_kernel_available(mask, *input.size()):
  127. return self.forward_fused_softmax(input, mask)
  128. else:
  129. return self.forward_torch_softmax(input, mask)
  130. def is_kernel_available(self, mask, b, np, sq, sk):
  131. attn_batches = b * np
  132. if (
  133. self.scaled_masked_softmax_fusion # user want to fuse
  134. and self.input_in_float16 # input must be fp16
  135. and (
  136. self.attn_mask_type == AttnMaskType.causal
  137. or (self.attn_mask_type == AttnMaskType.padding and mask is not None)
  138. )
  139. and 16 < sk <= 8192 # sk must be 16 ~ 8192
  140. and sq % 4 == 0 # sq must be divisor of 4
  141. and sk % 4 == 0 # sk must be divisor of 4
  142. and attn_batches % 4 == 0 # np * b must be divisor of 4
  143. ):
  144. if 0 <= sk <= 8192:
  145. batch_per_block = self.get_batch_per_block(sq, sk, b, np)
  146. if self.attn_mask_type == AttnMaskType.causal:
  147. if attn_batches % batch_per_block == 0:
  148. return True
  149. else:
  150. if sq % batch_per_block == 0:
  151. return True
  152. return False
  153. def forward_fused_softmax(self, input, mask):
  154. # input.shape = [b, np, sq, sk]
  155. scale = self.scale if self.scale is not None else 1.0
  156. return self.fused_softmax_func(input, mask, scale)
  157. def forward_torch_softmax(self, input, mask):
  158. if self.input_in_float16 and self.softmax_in_fp32:
  159. input = input.float()
  160. if self.scale is not None:
  161. input = input * self.scale
  162. mask_output = self.mask_func(input, mask) if mask is not None else input
  163. probs = torch.nn.Softmax(dim=-1)(mask_output)
  164. if self.input_in_float16 and self.softmax_in_fp32:
  165. if self.input_in_fp16:
  166. probs = probs.half()
  167. else:
  168. probs = probs.bfloat16()
  169. return probs
  170. @staticmethod
  171. def get_batch_per_block(sq, sk, b, np):
  172. return scaled_masked_softmax_get_batch_per_block(sq, sk, b, np)