paged_kv.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /******************************************************************************
  2. * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
  3. ******************************************************************************/
  4. #pragma once
  5. #include <cute/tensor.hpp>
  6. #include "cutlass/fast_math.h" // For cutlass::FastDivmod
  7. #include "utils.h"
  8. namespace flash {
  9. using namespace cute;
  10. template <int kBlockN, int kHeadDim, int kHeadDimV, int NumThreads, typename Element, bool KV_Same_Iter=false, int LoadsPerRow_LB=1>
  11. struct PagedKVManager {
  12. // If KV_Same_Iter=false, then we do load_page_table(0), load_K(0), load_page_table(1), load_K(1), load_V(0),
  13. // load_page_table(2), load_K(2), load_V(1), etc.
  14. // So we need to compute the V pointers for the previous iteration.
  15. // LoadsPerRow_LB is the lower bound on number of loads per row in the K direction. This is useful for
  16. // rotary where we want each thread to have at least 2 loads per row.
  17. static constexpr bool SameHeadDim = (kHeadDim == kHeadDimV);
  18. static constexpr int kHeadDimGCD = cute::gcd(kHeadDim, kHeadDimV);
  19. // We use CpAsync for K and V if PagedKV, since TMA doesn't work there
  20. static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element);
  21. static_assert(kHeadDimGCD % kGmemElemsPerLoad == 0, "Headdim and HeaddimV must be a multiple of kGmemElemsPerLoad");
  22. // We want each "row" to have 64 elements (128 bytes, i.e. 1 cache line). E.g. if hdim=128, we want each
  23. // thread to have 4 loads in the M direction and 2 vectorized load in the K direction.
  24. // In the case of PackGQA, this reduces the number of times we need to call divmod.
  25. static_assert(kHeadDimGCD % LoadsPerRow_LB == 0, "Headdim and HeaddimV must be a multiple of LoadsPerRow_LB");
  26. static constexpr int kBytePerRow = kHeadDimGCD / LoadsPerRow_LB * sizeof(Element);
  27. static constexpr int kBlockKGmem = (kBytePerRow % 128 == 0 ? 128 : (kBytePerRow % 64 == 0 ? 64 : 32)) / sizeof(Element);
  28. static constexpr int kGmemThreadsPerRow = kBlockKGmem / kGmemElemsPerLoad;
  29. static_assert(NumThreads % kGmemThreadsPerRow == 0, "NumThreads must be a multiple of kGmemThreadsPerRow");
  30. // We assume threads loading the same row are in the same warp. This is for an optimization in PagedKV where
  31. // these threads share the same page table entry and share the work of computing pointers to paged K and paged V.
  32. static_assert(cutlass::NumThreadsPerWarp % kGmemThreadsPerRow == 0, "kGmemThreadsPerRow must divide NumThreadsPerWarp");
  33. using GmemCopyAtomCpAsync = cute::Copy_Atom<SM80_CP_ASYNC_CACHEGLOBAL_ZFILL<uint128_t>, Element>;
  34. using GmemLayoutAtomKVCpAsync = Layout<Shape <Int<NumThreads / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>,
  35. Stride<Int<kGmemThreadsPerRow>, _1>>;
  36. using GmemTiledCopyKVCpAsync = decltype(
  37. make_tiled_copy(GmemCopyAtomCpAsync{},
  38. GmemLayoutAtomKVCpAsync{},
  39. Layout<Shape<_1, Int<kGmemElemsPerLoad>>>{})); // Val layout, 8 or 16 vals per load
  40. using GmemTiledCopyKVStore = decltype(
  41. make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, Element>{},
  42. GmemLayoutAtomKVCpAsync{},
  43. Layout<Shape<_1, Int<kGmemElemsPerLoad>>>{})); // Val layout, 8 or 16 vals per load
  44. using ShapeKV = cute::Shape<int32_t, int32_t, int32_t, int32_t>; // (seqlen, d, head, batch)
  45. using StrideKV = cute::Stride<int64_t, _1, int64_t, int64_t>;
  46. using ShapePageTable = cute::Shape<int32_t, int32_t>; // (batch, max_num_pages_per_seq)
  47. using StridePageTable = cute::Stride<int64_t, _1>;
  48. using TensorPageTable = decltype(make_tensor(make_gmem_ptr(static_cast<int const*>(nullptr)), ShapePageTable{}, StridePageTable{})(int(0), _));
  49. using TensorKV = decltype(make_tensor(make_gmem_ptr(static_cast<Element*>(nullptr)), ShapeKV{}, StrideKV{})(_, _, int(0), _));
  50. using GmemThrCopyKVCpAsync = decltype(GmemTiledCopyKVCpAsync{}.get_thread_slice(int(0)));
  51. using TensortKcK = decltype(GmemTiledCopyKVCpAsync{}.get_thread_slice(int(0)).partition_D(cute::make_identity_tensor(Shape<Int<kBlockN>, Int<kHeadDim>>{})));
  52. using TensortKpK = decltype(make_tensor<bool>(make_shape(size<1>(TensortKcK{}), size<2>(TensortKcK{})), Stride<_0, _1>{}));
  53. using TensortVcV = decltype(GmemTiledCopyKVCpAsync{}.get_thread_slice(int(0)).partition_D(cute::make_identity_tensor(Shape<Int<kBlockN>, Int<kHeadDimV>>{})));
  54. using TensortVpV = decltype(make_tensor<bool>(make_shape(size<1>(TensortVcV{}), size<2>(TensortVcV{})), Stride<_0, _1>{}));
  55. // For PagedKV, it's expensive the calculate the pointers to K and V for each page table entry,
  56. // since those require int64_t arithmetic. We optimize by having threads split this work.
  57. // Typically there are 8 threads loading per row (e.g. hdim 64 and 128), and there are 11 rows
  58. // that each thread needs to load for the case of hdim 128 and kBlockN = 176.
  59. // So each of those 8 threads will calculate the K_ptr and V_ptr for 11 / 8 = 2 rows.
  60. // We then use __shfl_sync to broadcast the pointers to the other threads in the warp.
  61. static_assert(CUTE_STATIC_V(size<1>(TensortKcK{})) == CUTE_STATIC_V(size<1>(TensortVcV{})));
  62. static constexpr int kPageEntryPerThread = cute::ceil_div(size<1>(TensortKcK{}), kGmemThreadsPerRow);
  63. using TensorPageOffset = decltype(make_tensor<cute::tuple<int, int>>(Shape<Int<kPageEntryPerThread>>{}));
  64. using TensorKVPtr = decltype(make_tensor<Element*>(Shape<Int<kPageEntryPerThread>>{}));
  65. GmemTiledCopyKVCpAsync gmem_tiled_copy_kv;
  66. cutlass::FastDivmod const &page_size_divmod;
  67. int const thread_idx;
  68. int const seqlen_k;
  69. int const leftpad_k;
  70. GmemThrCopyKVCpAsync const gmem_thr_copy_kv;
  71. TensorPageTable mPageTable;
  72. TensorKV mK_paged, mV_paged;
  73. TensortKpK tKpK;
  74. TensortVpV tVpV;
  75. TensorPageOffset tPrPageOffset;
  76. TensorKVPtr tPrVPtr;
  77. CUTLASS_DEVICE
  78. PagedKVManager(int const* const ptr_page_table,
  79. ShapePageTable const &shape_pagetable, StridePageTable const &stride_pagetable,
  80. Element* const ptr_K, ShapeKV const &shape_K, StrideKV const &stride_K,
  81. Element* const ptr_V, int const headdim_v, StrideKV const &stride_V,
  82. cutlass::FastDivmod const &page_size_divmod,
  83. int const bidb, int const bidh, int const thread_idx, int const seqlen_k, int const leftpad_k
  84. )
  85. : page_size_divmod(page_size_divmod)
  86. , thread_idx(thread_idx)
  87. , seqlen_k(seqlen_k)
  88. , leftpad_k(leftpad_k)
  89. , gmem_thr_copy_kv(gmem_tiled_copy_kv.get_thread_slice(thread_idx))
  90. {
  91. mPageTable = make_tensor(make_gmem_ptr(ptr_page_table), shape_pagetable, stride_pagetable)(bidb, _);
  92. mK_paged = make_tensor(make_gmem_ptr(ptr_K), shape_K, stride_K)(_, _, bidh, _);
  93. auto shape_V = make_shape(get<0>(shape_K), headdim_v, get<2>(shape_K), get<3>(shape_K));
  94. mV_paged = make_tensor(make_gmem_ptr(ptr_V), shape_V, stride_V)(_, _, bidh, _);
  95. tKpK = make_tensor<bool>(make_shape(size<1>(TensortKcK{}), size<2>(TensortKcK{})), Stride<_0, _1>{});
  96. Tensor cK = cute::make_identity_tensor(Shape<Int<kBlockN>, Int<kHeadDim>>{}); // (BLK_N,BLK_K) -> (blk_n,blk_k)
  97. Tensor tKcK = gmem_thr_copy_kv.partition_S(cK);
  98. #pragma unroll
  99. for (int k = 0; k < size<1>(tKpK); ++k) { tKpK(_0{}, k) = get<1>(tKcK(_0{}, _0{}, k)) < get<1>(shape_K); }
  100. Tensor tVpV_ = make_tensor<bool>(make_shape(size<1>(TensortVcV{}), size<2>(TensortVcV{})), Stride<_0, _1>{});
  101. Tensor cV = cute::make_identity_tensor(Shape<Int<kBlockN>, Int<kHeadDimV>>{}); // (BLK_N,BLK_K) -> (blk_n,blk_k)
  102. Tensor tVcV = gmem_thr_copy_kv.partition_S(cV);
  103. #pragma unroll
  104. for (int k = 0; k < size<1>(tVpV_); ++k) { tVpV_(_0{}, k) = get<1>(tVcV(_0{}, _0{}, k)) < get<1>(shape_V); }
  105. tVpV = cute::conditional_return<SameHeadDim>(tKpK, tVpV_);
  106. };
  107. template <bool Seqlenk_mask=false, bool First_iter=false>
  108. CUTLASS_DEVICE
  109. void load_page_table(const int n_block) {
  110. // The uncoalesced gmem load is intentional. This is so that each thread only loads the page table entries
  111. // it needs, and we don't need any sync between warps.
  112. // Assuming 8 threads per row, and 176 rows, then the rows from 0 to 175 are loaded by
  113. // threads 0, 8, 16, ..., 120, 1, 9, ..., 121, 2, 10, ..., 122, etc.
  114. #pragma unroll
  115. for (int i = 0; i < kPageEntryPerThread; ++i) {
  116. int const row = i * NumThreads + (thread_idx % kGmemThreadsPerRow) * (NumThreads / kGmemThreadsPerRow) + (thread_idx / kGmemThreadsPerRow);
  117. int const row_idx = n_block * kBlockN + row;
  118. int page_idx, page_offset;
  119. page_idx = page_size_divmod.divmod(page_offset, row_idx + leftpad_k);
  120. // Add the condition (i + 1) * NumThreads <= kBlockN since that is an upper bound of row
  121. // and is known at compile time. It avoids branching when e.g., kBlockN = 176 and i = 0.
  122. int const page = ((i + 1) * NumThreads <= kBlockN || row < kBlockN) && (!Seqlenk_mask || row_idx < seqlen_k) ? mPageTable[page_idx] : 0;
  123. tPrPageOffset[i] = {page, page_offset};
  124. // if (cute::thread0()) { printf("row = %d, page_idx = %d, page_offset = %d, page = %d, leftpad_k = %d, seqlen_k = %d\n", row, page_idx, page_offset, page, leftpad_k, seqlen_k); }
  125. }
  126. if constexpr (First_iter && !KV_Same_Iter) { compute_V_ptr(); }
  127. };
  128. CUTLASS_DEVICE
  129. TensorKVPtr compute_K_ptr() {
  130. Tensor tPrKPtr = make_tensor<Element*>(Shape<Int<kPageEntryPerThread>>{});
  131. #pragma unroll
  132. for (int i = 0; i < kPageEntryPerThread; ++i) {
  133. auto [page, page_offset] = tPrPageOffset[i];
  134. tPrKPtr[i] = &mK_paged(page_offset, _0{}, page);
  135. }
  136. return tPrKPtr;
  137. };
  138. CUTLASS_DEVICE
  139. void compute_V_ptr() {
  140. #pragma unroll
  141. for (int i = 0; i < kPageEntryPerThread; ++i) {
  142. auto [page, page_offset] = tPrPageOffset[i];
  143. tPrVPtr[i] = &mV_paged(page_offset, _0{}, page);
  144. }
  145. };
  146. template <bool Seqlenk_mask=false, typename TensorK>
  147. CUTLASS_DEVICE
  148. void load_K(const int n_block, TensorK &&sK) {
  149. // Do we need bound check to make sure the row doesn't go above kBlockN
  150. static constexpr bool EvenN = kBlockN % CUTE_STATIC_V(shape<0>(GmemLayoutAtomKVCpAsync{})) == 0;
  151. Tensor tPrKPtr = compute_K_ptr();
  152. // Only for index calculation, since all the indices of thread 0 are known at compile time
  153. auto gmem_thr0_copy_kv = gmem_tiled_copy_kv.get_thread_slice(_0{});
  154. Tensor tKsK = gmem_thr_copy_kv.partition_D(sK);
  155. Tensor cK = cute::make_identity_tensor(Shape<Int<kBlockN>, Int<kHeadDim>>{}); // (BLK_N,BLK_K) -> (blk_n,blk_k)
  156. // Repeat the partitioning with identity layouts
  157. Tensor tKcK = gmem_thr_copy_kv.partition_S(cK);
  158. Tensor t0KcK = gmem_thr0_copy_kv.partition_S(cK);
  159. // We want to use the row indices of thread0 to compare, since that is known at compile time.
  160. // So we subtract the limit by the first row index of this thread (get<0>(tKcK(_0{}, _0{}, _0{})))
  161. int const seqlenk_row_limit = -int(get<0>(tKcK(_0{}, _0{}, _0{}))) + (EvenN
  162. ? seqlen_k - n_block * kBlockN
  163. : (!Seqlenk_mask ? kBlockN : std::min(seqlen_k - n_block * kBlockN, kBlockN)));
  164. #pragma unroll
  165. for (int m = 0; m < size<1>(tKsK); ++m) {
  166. bool const should_load = EvenN
  167. ? (!Seqlenk_mask || get<0>(t0KcK(_0{}, m, _0{})) < seqlenk_row_limit)
  168. : get<0>(t0KcK(_0{}, m, _0{})) < seqlenk_row_limit;
  169. Element const* k_ptr = reinterpret_cast<Element const*>(__shfl_sync(0xffffffff, reinterpret_cast<uint64_t>(tPrKPtr(m / kGmemThreadsPerRow)), (m % kGmemThreadsPerRow), kGmemThreadsPerRow));
  170. Tensor mK_paged_cur = make_tensor(make_gmem_ptr(k_ptr), Shape<Int<kHeadDim>>{});
  171. Tensor mK_paged_cur_copy = cute::tiled_divide(mK_paged_cur, Shape<Int<kGmemElemsPerLoad>>{});
  172. if (should_load) {
  173. #pragma unroll
  174. for (int k = 0; k < size<2>(tKsK); ++k) {
  175. int const ki = get<1>(tKcK(_0{}, _0{}, k)) / kGmemElemsPerLoad;
  176. cute::copy(gmem_tiled_copy_kv.with(tKpK(_0{}, k)), mK_paged_cur_copy(_, ki), tKsK(_, m, k));
  177. }
  178. } // Don't need to clear out the rest of the smem since we'll mask out the scores anyway
  179. }
  180. };
  181. template <bool Seqlenk_mask=false, typename TensorV>
  182. CUTLASS_DEVICE
  183. void load_V(const int n_block, TensorV &&sV) {
  184. // Do we need bound check to make sure the row doesn't go above kBlockN
  185. static constexpr bool EvenN = kBlockN % CUTE_STATIC_V(shape<0>(GmemLayoutAtomKVCpAsync{})) == 0;
  186. if constexpr (KV_Same_Iter) { compute_V_ptr(); }
  187. // Only for index calculation, since all the indices of thread 0 are known at compile time
  188. auto gmem_thr0_copy_kv = gmem_tiled_copy_kv.get_thread_slice(_0{});
  189. Tensor tVsV = gmem_thr_copy_kv.partition_D(sV);
  190. Tensor cV = cute::make_identity_tensor(Shape<Int<kBlockN>, Int<kHeadDimV>>{}); // (BLK_N,BLK_K) -> (blk_n,blk_k)
  191. // Repeat the partitioning with identity layouts
  192. Tensor tVcV = gmem_thr_copy_kv.partition_S(cV);
  193. Tensor t0VcV = gmem_thr0_copy_kv.partition_S(cV);
  194. int const seqlenk_row_limit = seqlen_k - n_block * kBlockN - get<0>(tVcV(_0{}, _0{}, _0{}));
  195. #pragma unroll
  196. for (int m = 0; m < size<1>(tVsV); ++m) {
  197. // Faster to rely on the cp.async to clear smem that are out of bound,
  198. // rather than calling cute::clear directly.
  199. // We have to be careful not to write to smem past `kBlockN` if !EvenN.
  200. // If kBlockN doesn't evenly divide the tiled copy, only the last `m` needs to checked
  201. if (EvenN || m < size<1>(tVsV) - 1 || get<0>(tVcV(_0{}, m, _0{})) < kBlockN) {
  202. bool const should_load = !Seqlenk_mask || get<0>(t0VcV(_0{}, m, _0{})) < seqlenk_row_limit;
  203. Element const* v_ptr = reinterpret_cast<Element const*>(__shfl_sync(0xffffffff, reinterpret_cast<uint64_t>(tPrVPtr(m / kGmemThreadsPerRow)), m % kGmemThreadsPerRow, kGmemThreadsPerRow));
  204. Tensor mV_paged_cur = make_tensor(make_gmem_ptr(v_ptr), Shape<Int<kHeadDimV>>{});
  205. Tensor mV_paged_cur_copy = cute::tiled_divide(mV_paged_cur, Shape<Int<kGmemElemsPerLoad>>{});
  206. #pragma unroll
  207. for (int k = 0; k < size<2>(tVsV); ++k) {
  208. int const ki = get<1>(tVcV(_0{}, _0{}, k)) / kGmemElemsPerLoad;
  209. cute::copy(gmem_tiled_copy_kv.with(tVpV(_0{}, k) && should_load), mV_paged_cur_copy(_, ki), tVsV(_, m, k));
  210. }
  211. }
  212. }
  213. if constexpr (!KV_Same_Iter) { compute_V_ptr(); }
  214. };
  215. template <typename TensorK>
  216. CUTLASS_DEVICE
  217. void store_K(const int n_block, TensorK &&tKrK) {
  218. Tensor tPrKPtr = compute_K_ptr();
  219. // We're using the same partitioning as GmemTiledCopyKVCpAsync (used for loading)
  220. // Only for index calculation, since all the indices of thread 0 are known at compile time
  221. auto gmem_thr0_copy_kv = gmem_tiled_copy_kv.get_thread_slice(_0{});
  222. Tensor cK = cute::make_identity_tensor(Shape<Int<kBlockN>, Int<kHeadDim>>{}); // (BLK_N,BLK_K) -> (blk_n,blk_k)
  223. // Repeat the partitioning with identity layouts
  224. Tensor tKcK = gmem_thr_copy_kv.partition_S(cK);
  225. Tensor t0KcK = gmem_thr0_copy_kv.partition_S(cK);
  226. GmemTiledCopyKVStore gmem_tiled_copy_kv_store;
  227. // We want to use the row indices of thread0 to compare, since that is known at compile time.
  228. // So we subtract the limit by the first row index of this thread (get<0>(tKcK(_0{}, _0{}, _0{})))
  229. // int const seqlenk_row_limit = seqlen_k - n_block * kBlockN - get<0>(tKcK(_0{}, _0{}, _0{}));
  230. int const seqlenk_row_limit = std::min(seqlen_k - n_block * kBlockN, kBlockN) - get<0>(tKcK(_0{}, _0{}, _0{}));
  231. // if (threadIdx.x == 128) { printf("bidx = %d, bidy = %d, bidz = %d, seqlen_k = %d, seqlenk_row_limit = %d\n", blockIdx.x, blockIdx.y, blockIdx.z, seqlen_k, seqlenk_row_limit); }
  232. #pragma unroll
  233. for (int m = 0; m < size<1>(tKrK); ++m) {
  234. bool const should_load = get<0>(t0KcK(_0{}, m, _0{})) < seqlenk_row_limit;
  235. Element* k_ptr = reinterpret_cast<Element*>(__shfl_sync(0xffffffff, reinterpret_cast<uint64_t>(tPrKPtr(m / kGmemThreadsPerRow)), (m % kGmemThreadsPerRow), kGmemThreadsPerRow));
  236. Tensor mK_paged_cur = make_tensor(make_gmem_ptr(k_ptr), Shape<Int<kHeadDim>>{});
  237. Tensor mK_paged_cur_copy = cute::tiled_divide(mK_paged_cur, Shape<Int<kGmemElemsPerLoad>>{});
  238. if (should_load) {
  239. #pragma unroll
  240. for (int k = 0; k < size<2>(tKrK); ++k) {
  241. int const ki = get<1>(tKcK(_0{}, _0{}, k)) / kGmemElemsPerLoad;
  242. if (tKpK(_0{}, k)) {
  243. cute::copy(gmem_tiled_copy_kv_store, tKrK(_, m, k), mK_paged_cur_copy(_, ki));
  244. }
  245. }
  246. }
  247. }
  248. };
  249. template <typename TensorV>
  250. CUTLASS_DEVICE
  251. void store_V(const int n_block, TensorV &&tVrV) {
  252. if constexpr (KV_Same_Iter) { compute_V_ptr(); }
  253. // Only for index calculation, since all the indices of thread 0 are known at compile time
  254. auto gmem_thr0_copy_kv = gmem_tiled_copy_kv.get_thread_slice(_0{});
  255. Tensor cV = cute::make_identity_tensor(Shape<Int<kBlockN>, Int<kHeadDimV>>{}); // (BLK_N,BLK_K) -> (blk_n,blk_k)
  256. // Repeat the partitioning with identity layouts
  257. Tensor tVcV = gmem_thr_copy_kv.partition_S(cV);
  258. Tensor t0VcV = gmem_thr0_copy_kv.partition_S(cV);
  259. GmemTiledCopyKVStore gmem_tiled_copy_kv_store;
  260. int const seqlenk_row_limit = std::min(seqlen_k - n_block * kBlockN, kBlockN) - get<0>(tVcV(_0{}, _0{}, _0{}));
  261. #pragma unroll
  262. for (int m = 0; m < size<1>(tVrV); ++m) {
  263. bool const should_load = get<0>(t0VcV(_0{}, m, _0{})) < seqlenk_row_limit;
  264. Element* v_ptr = reinterpret_cast<Element*>(__shfl_sync(0xffffffff, reinterpret_cast<uint64_t>(tPrVPtr(m / kGmemThreadsPerRow)), m % kGmemThreadsPerRow, kGmemThreadsPerRow));
  265. Tensor mV_paged_cur = make_tensor(make_gmem_ptr(v_ptr), Shape<Int<kHeadDimV>>{});
  266. Tensor mV_paged_cur_copy = cute::tiled_divide(mV_paged_cur, Shape<Int<kGmemElemsPerLoad>>{});
  267. if (should_load) {
  268. #pragma unroll
  269. for (int k = 0; k < size<2>(tVrV); ++k) {
  270. int const ki = get<1>(tVcV(_0{}, _0{}, k)) / kGmemElemsPerLoad;
  271. if (tVpV(_0{}, k)) {
  272. cute::copy(gmem_tiled_copy_kv_store, tVrV(_, m, k), mV_paged_cur_copy(_, ki));
  273. }
  274. }
  275. }
  276. }
  277. if constexpr (!KV_Same_Iter) { compute_V_ptr(); }
  278. };
  279. };
  280. } // namespace flash