utility.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. /*
  2. * Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #pragma once
  17. #include <cassert>
  18. #include <cmath>
  19. #include <cstdint>
  20. #include <cuda_fp16.h>
  21. #include <cuda_runtime.h>
  22. #include <cuda_runtime_api.h>
  23. #include <iostream>
  24. #include "cutlass/cutlass.h"
  25. #include "cutlass_extensions/interleaved_numeric_conversion.h"
  26. namespace tensorrt_llm
  27. {
  28. namespace kernels
  29. {
  30. __forceinline__ __device__ float copysignf_pos(float a, float b)
  31. {
  32. float r;
  33. r = __int_as_float(__float_as_int(a) | (__float_as_int(b) & 0x80000000));
  34. return r;
  35. }
  36. __inline__ __device__ float tanh_opt(float x)
  37. {
  38. #if (__CUDA_ARCH__ >= 750 && CUDART_VERSION >= 11000)
  39. float r;
  40. asm("tanh.approx.f32 %0,%1; \n\t" : "=f"(r) : "f"(x));
  41. return r;
  42. #else
  43. const float exp_val = -1.f * fabs(2 * x);
  44. return copysignf_pos((1.0f - __expf(exp_val)) / (__expf(exp_val) + 1.0f), x);
  45. #endif
  46. }
  47. template <typename T>
  48. struct GeluActivation
  49. {
  50. static __device__ __forceinline__ T apply(const T& val)
  51. {
  52. const float cdf = 0.5f * (1.0f + tanh_opt((0.7978845608028654f * (val + 0.044715f * val * val * val))));
  53. return val * cdf;
  54. }
  55. };
  56. template <typename T>
  57. struct ReluActivation
  58. {
  59. static __device__ __forceinline__ T apply(const T& val)
  60. {
  61. return val > static_cast<T>(0.0f) ? val : static_cast<T>(0.0f);
  62. }
  63. };
  64. template <typename T>
  65. struct IdentityActivation
  66. {
  67. static __device__ __forceinline__ T apply(const T& val)
  68. {
  69. return val;
  70. }
  71. };
  72. template <typename VecType, typename T0, typename T1>
  73. __device__ __forceinline__ void load(T0* dst, T1* src, size_t offset = 0)
  74. {
  75. *reinterpret_cast<VecType*>(dst) = *(reinterpret_cast<const VecType*>(src) + offset);
  76. }
  77. template <typename AssignType, typename T>
  78. __device__ __forceinline__ void assign(T* dst, const AssignType& val)
  79. {
  80. *reinterpret_cast<AssignType*>(dst) = val;
  81. }
  82. template <typename VecType, typename T0, typename T1>
  83. __device__ __forceinline__ void store(T0* src, T1* dst, size_t offset = 0)
  84. {
  85. *(reinterpret_cast<VecType*>(dst) + offset) = *reinterpret_cast<const VecType*>(src);
  86. }
  87. } // namespace kernels
  88. } // namespace tensorrt_llm