attention_utils.cuh 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. /*
  2. * Adapted from https://github.com/NVIDIA/FasterTransformer/blob/release/v5.3_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp
  3. * Copyright (c) 2023, The PygmalionAI team.
  4. * Copyright (c) 2023, The vLLM team.
  5. * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
  6. *
  7. * Licensed under the Apache License, Version 2.0 (the "License");
  8. * you may not use this file except in compliance with the License.
  9. * You may obtain a copy of the License at
  10. *
  11. * http://www.apache.org/licenses/LICENSE-2.0
  12. *
  13. * Unless required by applicable law or agreed to in writing, software
  14. * distributed under the License is distributed on an "AS IS" BASIS,
  15. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. * See the License for the specific language governing permissions and
  17. * limitations under the License.
  18. */
  19. #pragma once
  20. #include "../cuda_compat.h"
  21. #include "attention_dtypes.h"
  22. #include <float.h>
  23. #include <type_traits>
  24. namespace aphrodite {
  25. // Q*K^T operation.
  26. template<int THREAD_GROUP_SIZE, typename Vec, int N>
  27. inline __device__ float qk_dot_(const Vec (&q)[N], const Vec (&k)[N]) {
  28. using A_vec = typename FloatVec<Vec>::Type;
  29. // Compute the parallel products for Q*K^T (treat vector lanes separately).
  30. A_vec qk_vec = mul<A_vec, Vec, Vec>(q[0], k[0]);
  31. #pragma unroll
  32. for (int ii = 1; ii < N; ++ii) {
  33. qk_vec = fma(q[ii], k[ii], qk_vec);
  34. }
  35. // Finalize the reduction across lanes.
  36. float qk = sum(qk_vec);
  37. #pragma unroll
  38. for (int mask = THREAD_GROUP_SIZE / 2; mask >= 1; mask /= 2) {
  39. qk += APHRODITE_SHFL_XOR_SYNC(qk, mask);
  40. }
  41. return qk;
  42. }
  43. template<typename T, int THREAD_GROUP_SIZE>
  44. struct Qk_dot {
  45. template<typename Vec, int N>
  46. static inline __device__ float dot(const Vec (&q)[N], const Vec (&k)[N]) {
  47. return qk_dot_<THREAD_GROUP_SIZE>(q, k);
  48. }
  49. };
  50. } // namespace aphrodite