flash_fwd_kernel.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. /******************************************************************************
  2. * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
  3. ******************************************************************************/
  4. #pragma once
  5. #include "cute/tensor.hpp"
  6. #include <cutlass/cutlass.h>
  7. #include <cutlass/arch/reg_reconfig.h>
  8. #include <cutlass/array.h>
  9. #include <cutlass/numeric_types.h>
  10. #include <cutlass/numeric_conversion.h>
  11. #include "cutlass/pipeline/pipeline.hpp"
  12. #include "flash.h"
  13. #include "utils.h"
  14. #include "softmax.h"
  15. #include "tile_scheduler.hpp"
  16. #include "mainloop_fwd_sm90_tma_gmma_ws.hpp"
  17. #include "epilogue_fwd_sm90_tma.hpp"
  18. namespace flash {
  19. using namespace cute;
  20. template <typename Ktraits, bool Is_causal, bool Varlen, typename TileScheduler>
  21. __global__ void __launch_bounds__(Ktraits::kNWarps * cutlass::NumThreadsPerWarp, 1)
  22. compute_attn_ws(CUTE_GRID_CONSTANT typename CollectiveMainloopFwd<Ktraits, Is_causal, Varlen>::Params const mainloop_params,
  23. CUTE_GRID_CONSTANT typename CollectiveEpilogueFwd<Ktraits, Varlen>::Params const epilogue_params,
  24. CUTE_GRID_CONSTANT typename TileScheduler::Params const scheduler_params
  25. ) {
  26. using Element = typename Ktraits::Element;
  27. using ElementAccum = typename Ktraits::ElementAccum;
  28. using SoftType = ElementAccum;
  29. using TileShape_MNK = typename Ktraits::TileShape_MNK;
  30. using ClusterShape = typename Ktraits::ClusterShape_MNK;
  31. static_assert(Ktraits::Is_WS);
  32. static constexpr bool Is_WS = Ktraits::Is_WS;
  33. static constexpr int NumMmaThreads = size(typename Ktraits::TiledMma0{});
  34. static constexpr int NumCopyThreads = !Is_WS ? 0 : cutlass::NumThreadsPerWarpGroup;
  35. static constexpr int kBlockM = Ktraits::kBlockM;
  36. // static constexpr int kBlockN = Ktraits::kBlockN;
  37. // constexpr int kHeadDim = Ktraits::kHeadDim;
  38. using CollectiveMainloop = CollectiveMainloopFwd<Ktraits, Is_causal, Varlen>;
  39. using CollectiveEpilogue = CollectiveEpilogueFwd<Ktraits, Varlen>;
  40. using MainloopPipeline = typename Ktraits::MainloopPipeline;
  41. using PipelineParams = typename MainloopPipeline::Params;
  42. using PipelineState = typename MainloopPipeline::PipelineState;
  43. extern __shared__ char shared_memory[];
  44. auto &shared_storage = *reinterpret_cast<typename Ktraits::SharedStorage*>(shared_memory);
  45. int const lane_predicate = cute::elect_one_sync();
  46. int const warp_idx = cutlass::canonical_warp_idx_sync();
  47. // Issue Tma Descriptor Prefetch from a single thread
  48. if (warp_idx == 0 && lane_predicate) {
  49. CollectiveMainloop::prefetch_tma_descriptors(mainloop_params);
  50. CollectiveEpilogue::prefetch_tma_descriptors(epilogue_params);
  51. }
  52. // Obtain warp index
  53. int const warp_group_thread_idx = threadIdx.x % cutlass::NumThreadsPerWarpGroup;
  54. PipelineParams pipeline_params;
  55. pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytesK;
  56. int warp_group_idx = cutlass::canonical_warp_group_idx();
  57. pipeline_params.role = warp_group_idx == 0
  58. ? MainloopPipeline::ThreadCategory::Producer
  59. : MainloopPipeline::ThreadCategory::Consumer;
  60. pipeline_params.is_leader = warp_group_thread_idx == 0;
  61. pipeline_params.num_consumers = NumMmaThreads;
  62. if (warp_idx == 0 && lane_predicate) {
  63. shared_storage.barrier_Q.init(1 /*numThreads*/);
  64. shared_storage.barrier_O.init(size(ClusterShape{}) /*numThreads*/);
  65. }
  66. // We're counting on pipeline_k to call cutlass::arch::fence_barrier_init();
  67. MainloopPipeline pipeline_k(shared_storage.pipeline_k, pipeline_params, ClusterShape{});
  68. MainloopPipeline pipeline_v(shared_storage.pipeline_v, pipeline_params, ClusterShape{});
  69. CollectiveMainloop collective_mainloop;
  70. CollectiveEpilogue collective_epilogue;
  71. // We need this to guarantee that the Pipeline init is visible to all producers and consumer blocks in the Cluster
  72. if constexpr (size(ClusterShape{}) > 1) {
  73. cute::cluster_arrive_relaxed();
  74. cute::cluster_wait();
  75. } else {
  76. __syncthreads();
  77. }
  78. static_assert(Ktraits::kNWarps == 12 || Ktraits::kNWarps == 16);
  79. if (warp_group_idx == 0) { // Producer
  80. cutlass::arch::warpgroup_reg_dealloc<Ktraits::kNWarps == 12 ? 24 : 32>();
  81. // cutlass::arch::warpgroup_reg_dealloc<56>();
  82. int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0);
  83. if (warp_idx_in_warpgroup == 0) { // Load Q, K, V
  84. PipelineState smem_pipe_write_k = cutlass::make_producer_start_state<MainloopPipeline>();
  85. PipelineState smem_pipe_write_v = cutlass::make_producer_start_state<MainloopPipeline>();
  86. int work_idx = 0;
  87. TileScheduler scheduler(&shared_storage.tile_count_semaphore);
  88. for (auto work_tile_info = scheduler.get_initial_work();
  89. work_tile_info.is_valid(scheduler_params);
  90. work_tile_info = scheduler.template get_next_work</*IsProducer=*/true>(scheduler_params, work_tile_info)) {
  91. auto block_coord = work_tile_info.get_block_coord(scheduler_params);
  92. auto [m_block, bidh, bidb] = block_coord;
  93. int n_block_max = collective_mainloop.get_n_block_max(mainloop_params, m_block, bidb);
  94. if ((Is_causal && n_block_max <= 0)
  95. || (Varlen && m_block * kBlockM >= collective_mainloop.get_seqlen_q(mainloop_params, bidb))) {
  96. scheduler.prefetch_next_work(scheduler_params, work_tile_info);
  97. scheduler.broadcast_next_work(work_tile_info);
  98. continue;
  99. }
  100. collective_mainloop.load(mainloop_params, pipeline_k, pipeline_v, smem_pipe_write_k, smem_pipe_write_v,
  101. shared_storage, scheduler, scheduler_params, work_tile_info, block_coord, work_idx);
  102. ++work_idx;
  103. }
  104. collective_mainloop.load_tail(pipeline_k, pipeline_v, smem_pipe_write_k, smem_pipe_write_v);
  105. }
  106. } else { // Consumer
  107. cutlass::arch::warpgroup_reg_alloc<Ktraits::kNWarps == 12 ? 240 : 160>();
  108. // cutlass::arch::warpgroup_reg_alloc<Ktraits::kNWarps == 12 ? 224 : 160>();
  109. TileScheduler scheduler(&shared_storage.tile_count_semaphore);
  110. // Initialize matmul objects.
  111. typename Ktraits::TiledMma1 tiled_mma1;
  112. PipelineState smem_pipe_read_k, smem_pipe_read_v;
  113. // We don't need separate variables smem_pipe_release_k and smem_pipe_release_v
  114. // (like in Cutlass's gemm) because the read and release pipeline states are always the same.
  115. collective_mainloop.mma_init();
  116. scheduler.init_consumer();
  117. int work_idx = 0;
  118. CUTLASS_PRAGMA_NO_UNROLL
  119. for (auto work_tile_info = scheduler.get_initial_work();
  120. work_tile_info.is_valid(scheduler_params);
  121. work_tile_info = scheduler.template get_next_work</*IsProducer=*/false>(scheduler_params, work_tile_info)) {
  122. // Attention output (GEMM-II) accumulator.
  123. Tensor tOrO = partition_fragment_C(tiled_mma1, select<0, 2>(TileShape_MNK{}));
  124. flash::Softmax<2 * (2 * kBlockM / NumMmaThreads)> softmax;
  125. auto block_coord = work_tile_info.get_block_coord(scheduler_params);
  126. auto [m_block, bidh, bidb] = block_coord;
  127. if (Varlen && m_block * kBlockM >= collective_mainloop.get_seqlen_q(mainloop_params, bidb)) {
  128. continue;
  129. }
  130. int n_block_max = collective_mainloop.get_n_block_max(mainloop_params, m_block, bidb);
  131. if (Is_causal && n_block_max <= 0) { // We exit early and write 0 to gO and -inf to gLSE.
  132. collective_epilogue.store_zero(epilogue_params, threadIdx.x - NumCopyThreads, block_coord);
  133. continue;
  134. }
  135. collective_mainloop.mma(mainloop_params, pipeline_k, pipeline_v, smem_pipe_read_k, smem_pipe_read_v,
  136. tOrO, softmax, n_block_max, threadIdx.x - NumCopyThreads, work_idx, block_coord, shared_storage);
  137. // tOrO, softmax, n_block_max, threadIdx.x - NumCopyThreads + (work_idx >> 30), work_idx, shared_storage);
  138. collective_epilogue.store(epilogue_params, tOrO, softmax.row_sum, shared_storage, tiled_mma1,
  139. threadIdx.x - NumCopyThreads, block_coord);
  140. ++work_idx;
  141. }
  142. collective_epilogue.store_tail();
  143. }
  144. }
  145. } // namespace flash