flash.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /******************************************************************************
  2. * Copyright (c) 2023, Tri Dao.
  3. ******************************************************************************/
  4. #pragma once
  5. #include <cuda.h>
  6. #include <vector>
  7. #include "cutlass/fast_math.h" // For cutlass::FastDivmod
  8. ////////////////////////////////////////////////////////////////////////////////////////////////////
  9. struct Qkv_params {
  10. using index_t = int64_t;
  11. // The QKV matrices.
  12. void *__restrict__ q_ptr;
  13. void *__restrict__ k_ptr;
  14. void *__restrict__ v_ptr;
  15. // The stride between rows of the Q, K and V matrices.
  16. index_t q_batch_stride;
  17. index_t k_batch_stride;
  18. index_t v_batch_stride;
  19. index_t q_row_stride;
  20. index_t k_row_stride;
  21. index_t v_row_stride;
  22. index_t q_head_stride;
  23. index_t k_head_stride;
  24. index_t v_head_stride;
  25. // The number of heads.
  26. int h, h_k;
  27. // In the case of multi-query and grouped-query attention (MQA/GQA), nheads_k could be
  28. // different from nheads (query).
  29. int h_h_k_ratio; // precompute h / h_k,
  30. };
  31. ////////////////////////////////////////////////////////////////////////////////////////////////////
  32. struct Flash_fwd_params : public Qkv_params {
  33. // The O matrix (output).
  34. void * __restrict__ o_ptr;
  35. void * __restrict__ oaccum_ptr;
  36. // The stride between rows of O.
  37. index_t o_batch_stride;
  38. index_t o_row_stride;
  39. index_t o_head_stride;
  40. // The pointer to the P matrix.
  41. void * __restrict__ p_ptr;
  42. // The pointer to the softmax sum.
  43. void * __restrict__ softmax_lse_ptr;
  44. void * __restrict__ softmax_lseaccum_ptr;
  45. // The dimensions.
  46. int b, seqlen_q, seqlen_k, seqlen_knew, d, seqlen_q_rounded, seqlen_k_rounded, d_rounded, rotary_dim, total_q, total_k;
  47. // The scaling factors for the kernel.
  48. float scale_softmax;
  49. float scale_softmax_log2;
  50. uint32_t scale_softmax_log2_half2;
  51. // array of length b+1 holding starting offset of each sequence.
  52. int * __restrict__ cu_seqlens_q;
  53. int * __restrict__ cu_seqlens_k;
  54. // If provided, the actual length of each k sequence.
  55. int * __restrict__ seqused_k;
  56. int *__restrict__ blockmask;
  57. // The K_new and V_new matrices.
  58. void * __restrict__ knew_ptr;
  59. void * __restrict__ vnew_ptr;
  60. // The stride between rows of the Q, K and V matrices.
  61. index_t knew_batch_stride;
  62. index_t vnew_batch_stride;
  63. index_t knew_row_stride;
  64. index_t vnew_row_stride;
  65. index_t knew_head_stride;
  66. index_t vnew_head_stride;
  67. // The cos and sin matrices for rotary embedding.
  68. void * __restrict__ rotary_cos_ptr;
  69. void * __restrict__ rotary_sin_ptr;
  70. // The indices to index into the KV cache.
  71. int * __restrict__ cache_batch_idx;
  72. // Paged KV cache
  73. int * __restrict__ block_table;
  74. index_t block_table_batch_stride;
  75. int page_block_size;
  76. // The dropout probability (probability of keeping an activation).
  77. float p_dropout;
  78. // uint32_t p_dropout_in_uint;
  79. // uint16_t p_dropout_in_uint16_t;
  80. uint8_t p_dropout_in_uint8_t;
  81. // Scale factor of 1 / (1 - p_dropout).
  82. float rp_dropout;
  83. float scale_softmax_rp_dropout;
  84. // Local window size
  85. int window_size_left, window_size_right;
  86. // Pointer to the RNG seed (idx 0) and offset (idx 1).
  87. uint64_t * rng_state;
  88. bool is_bf16;
  89. bool is_e4m3;
  90. bool is_causal;
  91. // If is_seqlens_k_cumulative, then seqlen_k is cu_seqlens_k[bidb + 1] - cu_seqlens_k[bidb].
  92. // Otherwise it's cu_seqlens_k[bidb], i.e., we use cu_seqlens_k to store the sequence lengths of K.
  93. bool is_seqlens_k_cumulative;
  94. bool is_rotary_interleaved;
  95. int num_splits; // For split-KV version
  96. void * __restrict__ alibi_slopes_ptr;
  97. index_t alibi_slopes_batch_stride;
  98. bool unpadded_lse; // For varlen paths: LSE is in [nheads, total_seqlen_q] format instead of [b, nheads, seqlen_q].
  99. int * __restrict__ tile_count_semaphore;
  100. };
  101. ////////////////////////////////////////////////////////////////////////////////////////////////////
  102. struct Flash_bwd_params : public Flash_fwd_params {
  103. // The dO and dQKV matrices.
  104. void *__restrict__ do_ptr;
  105. void *__restrict__ dq_ptr;
  106. void *__restrict__ dk_ptr;
  107. void *__restrict__ dv_ptr;
  108. // To accumulate dQ
  109. void *__restrict__ dq_accum_ptr;
  110. void *__restrict__ dk_accum_ptr;
  111. void *__restrict__ dv_accum_ptr;
  112. // // To accumulate dK and dV in case we're splitting the bwd along seqlen_q
  113. // dimension void *__restrict__ dk_accum_ptr; void *__restrict__
  114. // dv_accum_ptr;
  115. // The stride between rows of the dO, dQ, dK and dV matrices.
  116. // TD [2022-04-16]: We're using 32-bit indexing to save registers.
  117. // The code probably won't work for arrays larger than 2GB.
  118. index_t do_batch_stride;
  119. index_t do_row_stride;
  120. index_t do_head_stride;
  121. index_t dq_batch_stride;
  122. index_t dk_batch_stride;
  123. index_t dv_batch_stride;
  124. index_t dq_row_stride;
  125. index_t dk_row_stride;
  126. index_t dv_row_stride;
  127. index_t dq_head_stride;
  128. index_t dk_head_stride;
  129. index_t dv_head_stride;
  130. // The pointer to the softmax d sum.
  131. void *__restrict__ dsoftmax_sum;
  132. int *__restrict__ dq_semaphore;
  133. bool deterministic;
  134. index_t dq_accum_split_stride;
  135. };
  136. ////////////////////////////////////////////////////////////////////////////////////////////////////
  137. template<typename T, int Headdim> void run_mha_fwd_(Flash_fwd_params &params, cudaStream_t stream);
  138. template<typename T, int Headdim> void run_mha_bwd_(Flash_bwd_params &params, cudaStream_t stream);