attention_utils.cuh 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. /*
  2. * Adapted from
  3. * https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp
  4. * Copyright (c) 2023, The PygmalionAI team.
  5. * Copyright (c) 2023, The vLLM team.
  6. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
  7. *
  8. * Licensed under the Apache License, Version 2.0 (the "License");
  9. * you may not use this file except in compliance with the License.
  10. * You may obtain a copy of the License at
  11. *
  12. * http://www.apache.org/licenses/LICENSE-2.0
  13. *
  14. * Unless required by applicable law or agreed to in writing, software
  15. * distributed under the License is distributed on an "AS IS" BASIS,
  16. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17. * See the License for the specific language governing permissions and
  18. * limitations under the License.
  19. */
  20. #pragma once
  21. #include "../cuda_compat.h"
  22. #include "attention_dtypes.h"
  23. #include <float.h>
  24. #include <type_traits>
  25. namespace aphrodite {
  26. // Q*K^T operation.
  27. template <int THREAD_GROUP_SIZE, typename Vec, int N>
  28. inline __device__ float qk_dot_(const Vec (&q)[N], const Vec (&k)[N]) {
  29. using A_vec = typename FloatVec<Vec>::Type;
  30. // Compute the parallel products for Q*K^T (treat vector lanes separately).
  31. A_vec qk_vec = mul<A_vec, Vec, Vec>(q[0], k[0]);
  32. #pragma unroll
  33. for (int ii = 1; ii < N; ++ii) {
  34. qk_vec = aphrodite::fma(q[ii], k[ii], qk_vec);
  35. }
  36. // Finalize the reduction across lanes.
  37. float qk = sum(qk_vec);
  38. #pragma unroll
  39. for (int mask = THREAD_GROUP_SIZE / 2; mask >= 1; mask /= 2) {
  40. qk += APHRODITE_SHFL_XOR_SYNC(qk, mask);
  41. }
  42. return qk;
  43. }
  44. template <typename T, int THREAD_GROUP_SIZE>
  45. struct Qk_dot {
  46. template <typename Vec, int N>
  47. static inline __device__ float dot(const Vec (&q)[N], const Vec (&k)[N]) {
  48. return qk_dot_<THREAD_GROUP_SIZE>(q, k);
  49. }
  50. };
  51. } // namespace aphrodite