fused_softmax.cpp 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /* coding=utf-8
  2. * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <cuda_fp16.h>
  17. #include <torch/extension.h>
  18. #include <vector>
  19. namespace multihead_attn {
  20. namespace fused_softmax {
  21. namespace scaled_masked_softmax {
  22. torch::Tensor fwd_cuda(
  23. torch::Tensor const& input,
  24. torch::Tensor const& mask,
  25. float scale_factor);
  26. torch::Tensor bwd_cuda(
  27. torch::Tensor const& output_grads,
  28. torch::Tensor const& softmax_results,
  29. float scale_factor);
  30. int get_batch_per_block_cuda(
  31. int query_seq_len,
  32. int key_seq_len,
  33. int batches,
  34. int attn_heads);
  35. torch::Tensor fwd(
  36. torch::Tensor const& input,
  37. torch::Tensor const& mask,
  38. float scale_factor) {
  39. AT_ASSERTM(input.dim() == 4, "expected 4D tensor");
  40. AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||
  41. (input.scalar_type() == at::ScalarType::BFloat16),
  42. "Only fp16 and bf16 are supported");
  43. AT_ASSERTM(mask.dim() == 4, "expected 4D tensor");
  44. return fwd_cuda(input, mask, scale_factor);
  45. }
  46. torch::Tensor bwd(
  47. torch::Tensor const& output_grads,
  48. torch::Tensor const& softmax_results,
  49. float scale_factor) {
  50. AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor");
  51. AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor");
  52. AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) ||
  53. (output_grads.scalar_type() == at::ScalarType::BFloat16),
  54. "Only fp16 and bf16 are supported");
  55. AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) ||
  56. (softmax_results.scalar_type() == at::ScalarType::BFloat16),
  57. "Only fp16 and bf16 are supported");
  58. return bwd_cuda(output_grads, softmax_results, scale_factor);
  59. }
  60. int get_batch_per_block(
  61. int query_seq_len,
  62. int key_seq_len,
  63. int batches,
  64. int attn_heads) {
  65. return get_batch_per_block_cuda(query_seq_len, key_seq_len, batches, attn_heads);
  66. }
  67. } // end namespace scaled_masked_softmax
  68. } // end namespace fused_softmax
  69. } // end namespace multihead_attn
  70. namespace multihead_attn {
  71. namespace fused_softmax {
  72. namespace scaled_upper_triang_masked_softmax {
  73. torch::Tensor fwd_cuda(
  74. torch::Tensor const& input,
  75. float scale_factor);
  76. torch::Tensor bwd_cuda(
  77. torch::Tensor const& output_grads,
  78. torch::Tensor const& softmax_results,
  79. float scale_factor);
  80. torch::Tensor fwd(torch::Tensor const& input, float scale_factor) {
  81. AT_ASSERTM(input.dim() == 3, "expected 3D tensor");
  82. AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||
  83. (input.scalar_type() == at::ScalarType::BFloat16),
  84. "Only fp16 and bf16 are supported");
  85. return fwd_cuda(input, scale_factor);
  86. }
  87. torch::Tensor bwd(
  88. torch::Tensor const& output_grads,
  89. torch::Tensor const& softmax_results,
  90. float scale_factor) {
  91. AT_ASSERTM(output_grads.dim() == 3, "expected 3D tensor");
  92. AT_ASSERTM(softmax_results.dim() == 3, "expected 3D tensor");
  93. AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) ||
  94. (output_grads.scalar_type() == at::ScalarType::BFloat16),
  95. "Only fp16 and bf16 are supported");
  96. AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) ||
  97. (softmax_results.scalar_type() == at::ScalarType::BFloat16),
  98. "Only fp16 and bf16 are supported");
  99. return bwd_cuda(output_grads, softmax_results, scale_factor);
  100. }
  101. } // end namespace scaled_upper_triang_masked_softmax
  102. } // end namespace fused_softmax
  103. } // end namespace multihead_attn
  104. PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  105. m.def("scaled_masked_softmax_forward",
  106. &multihead_attn::fused_softmax::scaled_masked_softmax::fwd,
  107. "Self Multihead Attention scaled, time masked softmax -- Forward.");
  108. m.def("scaled_masked_softmax_backward",
  109. &multihead_attn::fused_softmax::scaled_masked_softmax::bwd,
  110. "Self Multihead Attention scaled, time masked softmax -- Backward.");
  111. m.def("scaled_masked_softmax_get_batch_per_block",
  112. &multihead_attn::fused_softmax::scaled_masked_softmax::get_batch_per_block,
  113. "Return Batch per block size."
  114. );
  115. m.def("scaled_upper_triang_masked_softmax_forward",
  116. &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::fwd,
  117. "Self Multihead Attention scaled, time masked softmax -- Forward.");
  118. m.def("scaled_upper_triang_masked_softmax_backward",
  119. &multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::bwd,
  120. "Self Multihead Attention scaled, time masked softmax -- Backward.");
  121. }