sample.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. import math
  2. from typing import Tuple, Optional
  3. import torch
  4. import triton
  5. import triton.language as tl
  6. from aphrodite.modeling.layers.ops.rand import seeded_uniform
  7. _EPS = 1e-6
  8. # This is a hardcoded limit in Triton (max block size).
  9. MAX_TRITON_N_COLS = 131072
  10. def get_num_triton_sampler_splits(n_cols: int) -> int:
  11. """Get the number of splits to use for Triton sampling.
  12. Triton has a limit on the number of columns it can handle, so we need to
  13. split the tensor and call the kernel multiple times if it's too large.
  14. """
  15. return math.ceil(n_cols / MAX_TRITON_N_COLS)
  16. def _multi_split_sample(
  17. probs: torch.Tensor,
  18. seeds: torch.Tensor,
  19. n_splits: int,
  20. sampled_tokens_size: Tuple[int, int],
  21. sampled_logprobs_size: Tuple[int, int],
  22. sample_indices: torch.Tensor,
  23. *,
  24. logprobs: Optional[torch.Tensor] = None,
  25. modify_greedy_probs: bool = False,
  26. save_logprobs: bool = False,
  27. ):
  28. """Sample tokens where vocab size is split into multiple parts
  29. (too large for Triton otherwise)."""
  30. assert seeds.ndim == 2 and seeds.shape[0] == n_splits
  31. split_probs = probs.tensor_split(n_splits, 1)
  32. split_logprobs = logprobs.tensor_split(n_splits, 1)
  33. sampled_tokens_tmp = [
  34. torch.empty(sampled_tokens_size, dtype=torch.long, device=probs.device)
  35. for _ in range(n_splits)
  36. ]
  37. sampled_logprobs_tmp = [
  38. torch.empty(sampled_logprobs_size,
  39. dtype=probs.dtype,
  40. device=probs.device) for _ in range(n_splits)
  41. ]
  42. # We are purposefuly using sampled_tokens_size as we need to always
  43. # save modified probs in this case.
  44. sampled_modified_probs_tmp = [
  45. torch.empty(sampled_tokens_size,
  46. dtype=probs.dtype,
  47. device=probs.device) for _ in range(n_splits)
  48. ]
  49. for i in range(n_splits):
  50. n_samples = sample_indices.shape[0]
  51. n_cols = split_probs[i].shape[1]
  52. n_best = sampled_tokens_tmp[i].shape[1]
  53. uniform_noise = seeded_uniform(n_samples,
  54. n_best,
  55. n_cols,
  56. seeds=seeds[i].flatten(),
  57. device=split_probs[i].device,
  58. dtype=split_probs[i].dtype)
  59. # TODO: See if we can remove the contiguous() calls.
  60. # Will need kernel support.
  61. _sample(
  62. split_probs[i].contiguous(),
  63. split_logprobs[i].contiguous(),
  64. sample_indices,
  65. sampled_tokens_tmp[i],
  66. sampled_logprobs_tmp[i],
  67. sampled_modified_probs_tmp[i],
  68. seeds[i],
  69. uniform_noise,
  70. modify_greedy_probs=False,
  71. save_logprobs=save_logprobs,
  72. save_modified_probs=True,
  73. )
  74. if i > 0:
  75. # Add offset to sampled tokens
  76. sampled_tokens_tmp[i].add_(i * split_probs[i - 1].shape[1])
  77. sampled_tokens = torch.stack(sampled_tokens_tmp)
  78. sampled_modified_probs = torch.stack(sampled_modified_probs_tmp)
  79. # Reduce the results from the splits.
  80. sampled_modified_probs, indices = torch.max(sampled_modified_probs,
  81. dim=0,
  82. keepdim=True)
  83. sampled_tokens = sampled_tokens.gather(0, indices).squeeze(0)
  84. if save_logprobs:
  85. sampled_logprobs = torch.stack(sampled_logprobs_tmp)
  86. sampled_logprobs = sampled_logprobs.gather(0, indices).squeeze(0)
  87. else:
  88. sampled_logprobs = None
  89. sampled_modified_probs = sampled_modified_probs.squeeze(0)
  90. if modify_greedy_probs:
  91. # We need to modify the greedy probs for the sampled tokens.
  92. # We can't do this in the kernel as we need to know the
  93. # sampled tokens.
  94. probs.fill_(0.0)
  95. probs.scatter_(1, sampled_tokens, 1.0)
  96. return (sampled_tokens, sampled_logprobs, sampled_modified_probs)
  97. def sample(
  98. probs: torch.Tensor,
  99. seeds: torch.Tensor,
  100. *,
  101. max_best_of: int = 1,
  102. sample_indices: Optional[torch.Tensor] = None,
  103. logprobs: Optional[torch.Tensor] = None,
  104. modify_greedy_probs: bool = False,
  105. save_logprobs: bool = False,
  106. _save_modified_probs: bool = False,
  107. ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
  108. """Sample tokens from probs. with per-sequence seeds.
  109. Can sample from a subset of sequences through sample_indices.
  110. Args:
  111. probs: Probabilities to sample from.
  112. shape = [batch_size, vocab_size]
  113. seeds: Per-sequence seed values.
  114. shape = [n, math.ceil(vocab_size / MAX_TRITON_N_COLS)]
  115. max_best_of: Number of samples to generate per sequence.
  116. Sequence seed will be incremented by 1 each time.
  117. sample_indices: Indices of sequences to sample from.
  118. If not provided, will sample from all sequences.
  119. shape = [n]
  120. logprobs: Log-probabilities of the sampled tokens.
  121. Only used for saving the logprobs if save_logprobs is True.
  122. shape = [batch_size, vocab_size]
  123. modify_greedy_probs: Whether to modify the greedy probabilities
  124. for speculative sampling (sampled token = 1.0,
  125. everything else = 0.0).
  126. save_logprobs: Whether to save the log-probabilities of the
  127. sampled tokens to a tensor.
  128. _save_modified_probs: Whether to save the modified probabilities
  129. (including gumbel noise) of the sampled tokens to a tensor.
  130. DOES NOT include the modification done by modify_greedy_probs
  131. (because we want to use the unmodified probs to pick the best
  132. split in case of multi-split sampling).
  133. This is exposed only for testing.
  134. Returns:
  135. sampled_tokens: shape = [n, max_best_of]
  136. sampled_logprobs: shape = [n, max_best_of] if save_logprobs else None
  137. sampled_modified_probs: shape = [n, max_best_of]
  138. if save_modified_probs else None
  139. """
  140. if sample_indices is None:
  141. sample_indices = torch.arange(0, probs.shape[0], device=probs.device)
  142. sampled_tokens_size = (sample_indices.size(0), max_best_of)
  143. if save_logprobs:
  144. if logprobs is None:
  145. raise ValueError(
  146. "logprobs tensor must be provided if save_logprobs is True")
  147. sampled_logprobs_size = sampled_tokens_size
  148. else:
  149. # Empty tensors to invoke the kernel
  150. sampled_logprobs_size = (0, 0)
  151. logprobs = probs
  152. if _save_modified_probs:
  153. sampled_modified_probs_size = sampled_tokens_size
  154. else:
  155. # Empty tensors to invoke the kernel
  156. sampled_modified_probs_size = (0, 0)
  157. # If the number of columns in probs is too large for Triton to handle,
  158. # we split the tensor and sample from each split separately, and then
  159. # do an argmax+gather to combine the results.
  160. n_splits = get_num_triton_sampler_splits(probs.shape[1])
  161. if n_splits > 1:
  162. (sampled_tokens, sampled_logprobs,
  163. sampled_modified_probs) = _multi_split_sample(
  164. probs,
  165. seeds,
  166. n_splits,
  167. sampled_tokens_size,
  168. sampled_logprobs_size,
  169. sample_indices,
  170. logprobs=logprobs,
  171. modify_greedy_probs=modify_greedy_probs,
  172. save_logprobs=save_logprobs)
  173. else:
  174. sampled_tokens = torch.empty(sampled_tokens_size,
  175. dtype=torch.long,
  176. device=probs.device)
  177. sampled_logprobs = torch.empty(sampled_logprobs_size,
  178. dtype=probs.dtype,
  179. device=probs.device)
  180. sampled_modified_probs = torch.empty(sampled_modified_probs_size,
  181. dtype=probs.dtype,
  182. device=probs.device)
  183. n_samples = sample_indices.shape[0]
  184. n_cols = probs.shape[1]
  185. uniform_noise = seeded_uniform(n_samples,
  186. max_best_of,
  187. n_cols,
  188. seeds=seeds.flatten(),
  189. device=probs.device,
  190. dtype=probs.dtype)
  191. _sample(
  192. probs,
  193. logprobs,
  194. sample_indices,
  195. sampled_tokens,
  196. sampled_logprobs,
  197. sampled_modified_probs,
  198. seeds,
  199. uniform_noise,
  200. modify_greedy_probs=modify_greedy_probs,
  201. save_logprobs=save_logprobs,
  202. save_modified_probs=_save_modified_probs,
  203. )
  204. return (sampled_tokens, sampled_logprobs if save_logprobs else None,
  205. sampled_modified_probs if _save_modified_probs else None)
  206. def _sample(probs: torch.Tensor,
  207. logprobs: torch.Tensor,
  208. sample_indices: torch.Tensor,
  209. output_samples: torch.Tensor,
  210. output_logprobs: torch.Tensor,
  211. output_modified_probs: torch.Tensor,
  212. seeds: torch.Tensor,
  213. uniform_noise: torch.Tensor,
  214. *,
  215. modify_greedy_probs: bool = False,
  216. save_logprobs: bool = True,
  217. save_modified_probs: bool = False) -> torch.Tensor:
  218. """Sample tokens from probs.
  219. Args:
  220. probs [batch_size, vocab_size]: probs to sample from.
  221. logprobs [batch_size, vocab_size]: logprobs (used when
  222. save_logprobsis True).
  223. sample_indices [n]: Indices of the samples to use for each row of probs.
  224. output_samples [n, n_best]: Output tensor to store samples in.
  225. output_logprobs [n, n_best]: Output tensor to store logprobs in.
  226. output_modified_probs [n, n_best]: Output tensor to store
  227. probs of chosen tokens in (modified with noise).
  228. seeds [n]: Seeds to use for sampling. If the seed is 0, we use
  229. greedy sampling. Note this is ONLY used for determining
  230. whether to use random sampling or not. The actual random
  231. noise should be passed as uniform_noise.
  232. uniform_noise [batch_size, n_best, vocab_size]: Uniform
  233. noise to use for random sampling (will be converted
  234. to exponential gumbel noise by the kernel).
  235. modify_greedy_probs: If True, we modify the probs tensor in-place
  236. to encode the sampling method used for each row. This is used
  237. in speculative decoding. Only applies in greedy decoding.
  238. save_logprobs: If True, we save the logprobs of the sampled tokens
  239. in the output_logprobs tensor.
  240. save_modified_probs: If True, we save the modified probs (with noise)
  241. of the sampled tokens in the output_modified_probs tensor.
  242. DOES NOT include the modification done by modify_greedy_probs
  243. (because we want to use the unmodified probs to pick the best
  244. split in case of multi-split sampling).
  245. """
  246. n_samples = sample_indices.shape[0]
  247. n_cols = probs.shape[1]
  248. n_best = output_samples.shape[1] if len(output_samples.shape) > 1 else 1
  249. # The block size is the smallest power of two greater than the number of
  250. # columns in probs
  251. block_size = triton.next_power_of_2(n_cols)
  252. num_warps = 4
  253. # Manual tuning. This seems to give best performance on A100 for
  254. # simple kernels like this.
  255. if block_size >= 8192:
  256. num_warps = 32
  257. elif block_size >= 4096:
  258. num_warps = 16
  259. elif block_size >= 2048:
  260. num_warps = 8
  261. # Enqueue kernel. The 1D launch grid is simple: we have one kernel
  262. # instance per row of the probs matrix
  263. _sample_triton[(n_samples, n_best)](
  264. sample_indices,
  265. output_samples,
  266. output_logprobs,
  267. output_modified_probs,
  268. probs,
  269. logprobs,
  270. seeds,
  271. uniform_noise,
  272. output_samples.stride(0),
  273. probs.stride(0),
  274. uniform_noise.stride(0),
  275. uniform_noise.stride(1) if n_best > 1 else 1,
  276. n_samples,
  277. n_cols,
  278. n_best,
  279. num_warps=num_warps,
  280. block_size=block_size,
  281. modify_greedy_probs=modify_greedy_probs,
  282. save_logprobs=save_logprobs,
  283. save_modified_probs=save_modified_probs,
  284. )
  285. return output_samples, output_logprobs, output_modified_probs
  286. @triton.jit
  287. def _uniform_to_exponential(uniform_noise):
  288. """Convert uniform samples to exponential samples."""
  289. # tl.rand returns values in [0, 1), so we clamp lower bound
  290. # to _EPS to avoid log(0) and thus division by 0 later
  291. lb = tl.full(uniform_noise.shape, _EPS, uniform_noise.dtype)
  292. uniform_noise = tl.maximum(uniform_noise, lb)
  293. # Use the inversion method to turn uniform samples
  294. # into exponential samples
  295. exponential_noise = -tl.log(uniform_noise)
  296. return exponential_noise
  297. @triton.jit
  298. def _sample_triton(
  299. sample_indices_ptr: torch.Tensor, output_ptr: torch.Tensor,
  300. output_logprobs_ptr: torch.Tensor,
  301. output_modified_probs_ptr: torch.Tensor, probs_ptr: torch.Tensor,
  302. logprobs_ptr: torch.Tensor, seeds_ptr: torch.Tensor,
  303. uniform_noise_ptr: torch.Tensor, output_row_stride: int,
  304. probs_row_stride: int, uniform_noise_row_stride: int,
  305. uniform_noise_best_stride: int, n_samples: int, n_cols: int,
  306. n_best: int, block_size: tl.constexpr,
  307. modify_greedy_probs: tl.constexpr, save_logprobs: tl.constexpr,
  308. save_modified_probs: tl.constexpr):
  309. # The rows are independent, so we parallelize across those
  310. sample_idx = tl.program_id(0)
  311. best_idx = tl.program_id(1)
  312. # Load the row index from DRAM
  313. row_idx = tl.load(sample_indices_ptr + sample_idx)
  314. seed = tl.load(seeds_ptr + sample_idx)
  315. uses_random_sampling = seed != 0
  316. # The stride represents how much we need to increase the
  317. # pointer to advance 1 row
  318. row_start_ptr = probs_ptr + row_idx * probs_row_stride
  319. # The block size is the next power of two greater than n_cols,
  320. # so we can fit each row in a single block
  321. col_offsets = tl.arange(0, block_size)
  322. # Load the row into SRAM, using a mask since block_size may be > than n_cols
  323. row = tl.load(row_start_ptr + col_offsets,
  324. mask=col_offsets < n_cols,
  325. other=float("-inf"))
  326. if uses_random_sampling:
  327. uniform_noise_start_ptr = (uniform_noise_ptr +
  328. sample_idx * uniform_noise_row_stride +
  329. best_idx * uniform_noise_best_stride)
  330. uniform_noise = tl.load(uniform_noise_start_ptr + col_offsets,
  331. mask=col_offsets < n_cols,
  332. other=0.5)
  333. exponential_noise = _uniform_to_exponential(uniform_noise)
  334. row /= exponential_noise
  335. sampled_value, sampled_token = tl.max(row, axis=0, return_indices=True)
  336. # clamp sampled token to n_cols - 1
  337. # this should not be necessary, but we do it
  338. # just in case
  339. if sampled_token >= n_cols:
  340. sampled_token = n_cols - 1
  341. # Write back output to DRAM
  342. output_row_start_ptr = (output_ptr + sample_idx * output_row_stride +
  343. best_idx)
  344. tl.store(output_row_start_ptr, sampled_token)
  345. if modify_greedy_probs: # noqa
  346. if not uses_random_sampling:
  347. # Set the probability of the sampled token to 1, all other
  348. # tokens to zero. This is used in speculative decoding where
  349. # the sampling method must be encoded within the sampled
  350. # probability distributions.
  351. row = tl.where(col_offsets == sampled_token, 1.0, 0.0)
  352. tl.store(row_start_ptr + col_offsets,
  353. row,
  354. mask=col_offsets < n_cols)
  355. if save_modified_probs:
  356. output_row_start_ptr = (output_modified_probs_ptr +
  357. sample_idx * output_row_stride + best_idx)
  358. tl.store(output_row_start_ptr, sampled_value)
  359. if save_logprobs:
  360. # Load the row into SRAM, using a mask since block_size
  361. # may be > than n_cols
  362. sampled_logprob = tl.load(logprobs_ptr + row_idx * probs_row_stride +
  363. sampled_token)
  364. # Write back output to DRAM
  365. output_row_start_ptr = (output_logprobs_ptr +
  366. sample_idx * output_row_stride + best_idx)
  367. tl.store(output_row_start_ptr, sampled_logprob)