123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235 |
- #pragma once
- #include <cmath>
- #include <cute/tensor.hpp>
- #include <cutlass/numeric_types.h>
- #include "utils.h"
- #include "cutlass/fast_math.h"
- namespace flash {
- using namespace cute;
- template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
- __device__ __forceinline__ void thread_reduce_(Tensor<Engine0, Layout0> const &tensor, Tensor<Engine1, Layout1> &summary, Operator &op) {
- static_assert(Layout0::rank == 2, "Only support 2D Tensor");
- static_assert(Layout1::rank == 1, "Only support 1D Tensor");
- CUTE_STATIC_ASSERT_V(size<0>(summary) == size<0>(tensor));
- #pragma unroll
- for (int mi = 0; mi < size<0>(tensor); mi++) {
- summary(mi) = zero_init ? tensor(mi, 0) : op(summary(mi), tensor(mi, 0));
- #pragma unroll
- for (int ni = 1; ni < size<1>(tensor); ni++) {
- summary(mi) = op(summary(mi), tensor(mi, ni));
- }
- }
- }
- template<typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
- __device__ __forceinline__ void quad_allreduce_(Tensor<Engine0, Layout0> &dst, Tensor<Engine1, Layout1> &src, Operator &op) {
- CUTE_STATIC_ASSERT_V(size(dst) == size(src));
- #pragma unroll
- for (int i = 0; i < size(dst); i++){
- dst(i) = Allreduce<4>::run(src(i), op);
- }
- }
- template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
- __device__ __forceinline__ void reduce_(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &summary, Operator &op) {
- thread_reduce_<zero_init>(tensor, summary, op);
- quad_allreduce_(summary, summary, op);
- }
- template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
- __device__ __forceinline__ void reduce_max(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &max){
- MaxOp<float> max_op;
- reduce_<zero_init>(tensor, max, max_op);
- }
- template<bool zero_init=true, bool warp_reduce=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
- __device__ __forceinline__ void reduce_sum(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &sum){
- SumOp<float> sum_op;
- thread_reduce_<zero_init>(tensor, sum, sum_op);
- if constexpr (warp_reduce) { quad_allreduce_(sum, sum, sum_op); }
- }
- __forceinline__ __device__ __half2 half_exp(__half2 x) {
- uint32_t tmp_out, tmp_in;
- tmp_in = reinterpret_cast<uint32_t&>(x);
- asm ("ex2.approx.f16x2 %0, %1;\n"
- : "=r"(tmp_out)
- : "r"(tmp_in));
- __half2 out = reinterpret_cast<__half2&>(tmp_out);
- return out;
- }
- template <bool zero_init=false, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
- __forceinline__ __device__ void max_scale_exp2_sum(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> &max, Tensor<Engine1, Layout1> &sum, const float scale) {
- static_assert(Layout0::rank == 2, "Only support 2D Tensor"); static_assert(Layout1::rank == 1, "Only support 1D Tensor"); CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor));
- #pragma unroll
- for (int mi = 0; mi < size<0>(tensor); ++mi) {
- MaxOp<float> max_op;
- max(mi) = zero_init ? tensor(mi, 0) : max_op(max(mi), tensor(mi, 0));
- #pragma unroll
- for (int ni = 1; ni < size<1>(tensor); ni++) {
- max(mi) = max_op(max(mi), tensor(mi, ni));
- }
- max(mi) = Allreduce<4>::run(max(mi), max_op);
-
-
- const float max_scaled = max(mi) == -INFINITY ? 0.f : max(mi) * scale;
- sum(mi) = 0;
- #pragma unroll
- for (int ni = 0; ni < size<1>(tensor); ++ni) {
-
-
-
- tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled);
- sum(mi) += tensor(mi, ni);
- }
- }
- }
- template <bool Scale_max=true, bool Check_inf=true, bool Use_max_offset=false,
- typename Engine0, typename Layout0, typename Engine1, typename Layout1>
- __forceinline__ __device__ void scale_apply_exp2(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &max, const float scale) {
- constexpr static float max_offset = Use_max_offset ? 8.0f : 0.0f;
- static_assert(Layout0::rank == 2, "Only support 2D Tensor");
- static_assert(Layout1::rank == 1, "Only support 1D Tensor");
- CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor));
- #pragma unroll
- for (int mi = 0; mi < size<0>(tensor); ++mi) {
-
-
-
- const float max_scaled = Check_inf
- ? (max(mi) == -INFINITY ? 0.f : (!Scale_max ? max(mi) : max(mi) * scale) - max_offset)
- : (!Scale_max ? max(mi) : max(mi) * scale) - max_offset;
- #pragma unroll
- for (int ni = 0; ni < size<1>(tensor); ++ni) {
-
-
-
- tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled);
- }
- }
- }
- template <int kNRows, bool Use_max_offset_ = false>
- struct Softmax {
- constexpr static bool Use_max_offset = Use_max_offset_;
-
-
- using TensorT = decltype(make_tensor<float>(Shape<Int<kNRows>>{}));
- TensorT row_max, row_sum;
- const float softmax_scale_log2;
- CUTLASS_DEVICE Softmax(float scale_ = 1.f) : softmax_scale_log2(scale_) {};
- template<bool Is_first, bool Check_inf=false, typename Tensor0>
- __forceinline__ __device__ TensorT max(Tensor0 &acc_s) {
-
- Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout()));
- static_assert(decltype(size<0>(scores))::value == kNRows);
- TensorT scores_scale;
- if constexpr (Is_first) {
- flash::template reduce_max<true>(scores, row_max);
- cute::fill(scores_scale, 1.f);
- } else {
- Tensor scores_max_prev = make_fragment_like(row_max);
- cute::copy(row_max, scores_max_prev);
- flash::template reduce_max<false>(scores, row_max);
- #pragma unroll
- for (int mi = 0; mi < size(row_max); ++mi) {
- float scores_max_cur = !Check_inf
- ? row_max(mi)
- : (row_max(mi) == -INFINITY ? 0.0f : row_max(mi));
- scores_scale(mi) = exp2f((scores_max_prev(mi) - scores_max_cur) * softmax_scale_log2);
- row_sum(mi) *= scores_scale(mi);
- }
- }
- return scores_scale;
- };
- template<bool Is_first, bool Check_inf=false, typename Tensor0>
- __forceinline__ __device__ TensorT online_softmax(Tensor0 &acc_s) {
-
- Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout()));
- static_assert(decltype(size<0>(scores))::value == kNRows);
- TensorT scores_scale;
- if constexpr (Is_first) {
- flash::template reduce_max<true>(scores, row_max);
- flash::template scale_apply_exp2<true, true, Use_max_offset>(scores, row_max, softmax_scale_log2);
- flash::reduce_sum<true, false>(scores, row_sum);
- cute::fill(scores_scale, 1.f);
-
- } else {
-
-
-
-
-
-
-
-
-
-
-
-
- flash::template scale_apply_exp2<true, Check_inf, Use_max_offset>(scores, row_max, softmax_scale_log2);
-
-
- flash::reduce_sum<false, false>(scores, row_sum);
- }
- return scores_scale;
- };
- template<bool Is_dropout=false, bool Split=false, typename Tensor0>
- __forceinline__ __device__ TensorT finalize(Tensor0 &acc_s, float descale_v = 1.f, float rp_dropout=1.f) {
- constexpr static float max_offset_E = Use_max_offset ? 8.f * float(M_LN2) : 0.f;
-
- Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout()));
- static_assert(decltype(size<0>(scores))::value == kNRows);
- SumOp<float> sum_op;
- quad_allreduce_(row_sum, row_sum, sum_op);
- TensorT scores_scale;
- #pragma unroll
- for (int mi = 0; mi < size(row_max); ++mi) {
- float sum = row_sum(mi);
- float inv_sum = (sum == 0.f || sum != sum) ? 0.f : descale_v / sum;
- row_sum(mi) = (sum == 0.f || sum != sum) ? (Split ? -INFINITY : INFINITY) : (row_max(mi) * softmax_scale_log2) * float(M_LN2) - max_offset_E + __logf(sum);
- scores_scale(mi) = !Is_dropout ? inv_sum : inv_sum * rp_dropout;
- }
- return scores_scale;
- };
- template<typename Tensor1>
- __forceinline__ __device__ void rescale_o(Tensor1 &acc_o, TensorT const &scores_scale) {
-
- Tensor acc_o_rowcol = make_tensor(acc_o.data(), flash::convert_layout_acc_rowcol(acc_o.layout()));
- static_assert(decltype(size<0>(acc_o_rowcol))::value == kNRows);
- #pragma unroll
- for (int mi = 0; mi < size(row_max); ++mi) {
- #pragma unroll
- for (int ni = 0; ni < size<1>(acc_o_rowcol); ++ni) { acc_o_rowcol(mi, ni) *= scores_scale(mi); }
- }
- };
- };
- }
|