rejection.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. from functools import cached_property
  2. from typing import Optional, Tuple
  3. import torch
  4. import torch.jit
  5. import torch.nn as nn
  6. class RejectionSampler(nn.Module):
  7. """Apply modified rejection sampling as described in "Accelerating Large
  8. Language Model Decoding with Speculative Sampling"
  9. https://arxiv.org/pdf/2302.01318.pdf.
  10. """
  11. def __init__(self, strict_mode: bool = False):
  12. """Create a rejection sampler.
  13. Args:
  14. strict_mode: Whether or not to perform shape/device/dtype checks
  15. during sampling. This catches correctness issues but adds
  16. nontrivial latency.
  17. """
  18. super().__init__()
  19. self._strict_mode = strict_mode
  20. # NOTE: A "bonus token" is accepted iff all proposal tokens are
  21. # accepted. There is always only one possible bonus token. We store this
  22. # value in a variable for readability.
  23. self._num_bonus_tokens = 1
  24. self.num_accepted_tokens: Optional[torch.Tensor] = None
  25. self.num_emitted_tokens: Optional[torch.Tensor] = None
  26. self.num_draft_tokens: int = 0
  27. def init_gpu_tensors(self, rank: int) -> None:
  28. assert self.num_accepted_tokens is None
  29. device = f"cuda:{rank}"
  30. self.num_accepted_tokens = torch.tensor(0,
  31. dtype=torch.long,
  32. device=device)
  33. self.num_emitted_tokens = torch.tensor(0,
  34. dtype=torch.long,
  35. device=device)
  36. @property
  37. def probs_dtype(self):
  38. return torch.float32
  39. @property
  40. def token_id_dtype(self):
  41. return torch.int64
  42. def forward(
  43. self,
  44. target_probs: torch.Tensor,
  45. bonus_token_ids: torch.Tensor,
  46. draft_probs: torch.Tensor,
  47. draft_token_ids: torch.Tensor,
  48. ) -> torch.Tensor:
  49. """Sample token ids using rejection sampling. This accepts or rejects
  50. tokens proposed by the draft model using the probability of each token
  51. according to the draft and target models.
  52. In the worst case where all draft tokens are rejected, it is guaranteed
  53. one correct token will be emitted.
  54. In the case where all draft tokens are accepted, a bonus token will be
  55. accepted as its cheap to have the target model score this speculative
  56. sequence.
  57. Args:
  58. target_probs: The probability distribution over token ids given
  59. context according to the target model.
  60. shape = [batch_size, num_speculative_tokens, vocab_size]
  61. bonus_token_ids: The "bonus" token ids that are accepted iff all
  62. speculative tokens in a sequence are accepted.
  63. shape = [batch_size, num_bonus_tokens]
  64. draft_probs: The probability distribution over token ids given
  65. context according to the draft model.
  66. shape = [batch_size, num_speculative_tokens, vocab_size]
  67. draft_token_ids: The token ids that were sampled from the draft
  68. probabilities.
  69. shape = [batch_size, num_speculative_tokens]
  70. Returns:
  71. output_token_ids: The token ids sampled via rejection sampling,
  72. or -1 if unable to sample a token because the previous token
  73. was rejected.
  74. shape = [batch_size, num_speculative_tokens + num_bonus_tokens]
  75. """
  76. # Only perform shape/dtype/device checking in strict mode, as it adds
  77. # overhead.
  78. if self._strict_mode:
  79. self._raise_if_incorrect_shape(target_probs, bonus_token_ids,
  80. draft_probs, draft_token_ids)
  81. self._raise_if_incorrect_dtype(target_probs, bonus_token_ids,
  82. draft_probs, draft_token_ids)
  83. self._raise_if_inconsistent_device(target_probs, bonus_token_ids,
  84. draft_probs, draft_token_ids)
  85. self._raise_if_out_of_bounds_vocab(target_probs.shape[-1],
  86. bonus_token_ids,
  87. draft_token_ids)
  88. accepted, recovered_token_ids = self._batch_modified_rejection_sampling(
  89. target_probs,
  90. draft_probs,
  91. draft_token_ids,
  92. )
  93. output_token_ids = self._create_output(
  94. accepted,
  95. recovered_token_ids,
  96. draft_token_ids,
  97. bonus_token_ids,
  98. )
  99. return output_token_ids
  100. def _batch_modified_rejection_sampling(
  101. self,
  102. target_probs: torch.Tensor, # [batch_size, k, vocab_size]
  103. draft_probs: torch.Tensor, # [batch_size, k, vocab_size]
  104. draft_token_ids: torch.Tensor, # [batch_size, k]
  105. ) -> Tuple[torch.Tensor, torch.Tensor]:
  106. """Perform modified rejection sampling on each sequence.
  107. Returns:
  108. A tuple of two tensors:
  109. 0: A bool tensor of which tokens in each sequence is accepted.
  110. shape = [batch_size, k]
  111. 1: Token ids sampled from a recovered distribution, to be used
  112. when a token is rejected.
  113. shape = [batch_size, k]
  114. """
  115. batch_size, k, vocab_size = draft_probs.shape
  116. # shape [batch_size, k]
  117. accepted = self._get_accepted(target_probs, draft_probs,
  118. draft_token_ids)
  119. recovered_probs = self._get_recovered_probs(
  120. target_probs, draft_probs).reshape(batch_size * k, vocab_size)
  121. # NOTE: the recovered_probs are overwritten by this method.
  122. recovered_token_ids = _multinomial(recovered_probs,
  123. num_samples=1).reshape(
  124. batch_size, k)
  125. return accepted, recovered_token_ids
  126. def _get_accepted(
  127. self,
  128. target_probs: torch.Tensor, # [batch_size, k, vocab_size]
  129. draft_probs: torch.Tensor, # [batch_size, k, vocab_size]
  130. draft_token_ids: torch.Tensor, # [batch_size, k]
  131. ) -> torch.Tensor:
  132. r"""Create bool matrix over the proposed draft tokens. If
  133. True, then a token can be accepted, else it should be
  134. rejected.
  135. Given :math:`q(\hat{x}_{n+1}|x_1, \dots, x_n)`, the probability of
  136. :math:`\hat{x}_{n+1}` given context :math:`x_1, \dots, x_n` according
  137. to the target model, and :math:`p(\hat{x}_{n+1}|x_1, \dots, x_n)`, the
  138. same conditional probability according to the draft model, the token
  139. is accepted with probability:
  140. .. math::
  141. \min\left(1, \frac{q(\hat{x}_{n+1}|x_1, \dots, x_n)}
  142. {p(\hat{x}_{n+1}|x_1, \dots, x_n)}\right)
  143. This implementation does not apply causality. When using the output,
  144. if a token is rejected, subsequent tokens should not be used.
  145. Returns a bool tensor of shape [batch_size, k] specifying which tokens
  146. are accepted.
  147. """
  148. batch_size, k, _ = draft_probs.shape
  149. batch_indices = torch.arange(batch_size,
  150. device=target_probs.device)[:, None]
  151. probs_indicies = torch.arange(k, device=target_probs.device)
  152. # shape [batch_size, k]
  153. selected_draft_probs = draft_probs[batch_indices, probs_indicies,
  154. draft_token_ids]
  155. # shape [batch_size, k]
  156. selected_target_probs = target_probs[batch_indices, probs_indicies,
  157. draft_token_ids]
  158. uniform_rand = torch.rand(batch_size,
  159. k,
  160. dtype=self.probs_dtype,
  161. device=target_probs.device)
  162. capped_ratio = torch.minimum(
  163. selected_target_probs / selected_draft_probs,
  164. torch.full((1, ), 1, device=target_probs.device))
  165. accepted = uniform_rand < capped_ratio
  166. return accepted
  167. def _get_recovered_probs(
  168. self,
  169. target_probs: torch.Tensor, # [k, vocab_size]
  170. draft_probs: torch.Tensor, # [k, vocab_size]
  171. ) -> torch.Tensor:
  172. r"""Create a probability distribution for each proposed token which can
  173. be sampled if the proposed token is rejected.
  174. When this routine is applied sequentially, the true distribution of the
  175. target model is recovered (within hardware numerics).
  176. The probability distribution used in this rejection case is constructed
  177. as follows. Given :math:`q(x|x_1, \dots, x_n)`, the probability of
  178. :math:`x` given context :math:`x_1, \dots, x_n` according to the target
  179. model and :math:`p(x|x_1, \dots, x_n)`, the same conditional probability
  180. according to the draft model:
  181. .. math::
  182. x_{n+1} \sim (q(x|x_1, \dots, x_n) - p(x|x_1, \dots, x_n))_+
  183. where :math:`(f(x))_+` is defined as:
  184. .. math::
  185. (f(x))_+ = \frac{\max(0, f(x))}{\sum_x \max(0, f(x))}
  186. Returns a tensor of shape [batch_size, k, vocab_size].
  187. Note: This batches operations on GPU and thus constructs the recovered
  188. distribution for all tokens, even if they are accepted. This causes
  189. division-by-zero errors, so we use self._smallest_positive_value to
  190. avoid that. This introduces some drift to the distribution.
  191. """
  192. _, k, _ = draft_probs.shape
  193. # shape [batch_size, k, vocab_size]
  194. difference = target_probs - draft_probs
  195. # TODO: Can we use logprobs instead of probs, and avoid the
  196. # division-by-zero errors without introducing distribution drift?
  197. # shape [batch_size, k, vocab_size]
  198. f = torch.clamp(difference, min=self._smallest_positive_value)
  199. # shape [batch_size, k, vocab_size]
  200. recovered_probs = f / torch.sum(f, dim=-1).reshape(-1, k, 1)
  201. return recovered_probs
  202. @cached_property
  203. def _smallest_positive_value(self) -> float:
  204. """Return the smallest positive value representable by the probs dtype.
  205. This value is used when constructing a distribution from which to sample
  206. recovered tokens in the first rejection case.
  207. See _get_recovered_probs for more details
  208. Note that this isn't actually the smallest positive value representable
  209. by float32, but the smallest positive normal value.
  210. See https://en.wikipedia.org/wiki/Subnormal_number for more information.
  211. """
  212. return torch.finfo(self.probs_dtype).tiny
  213. def _create_output(
  214. self,
  215. accepted: torch.Tensor, # [batch_size, k]
  216. recovered_token_ids: torch.Tensor, # [batch_size, k]
  217. draft_token_ids: torch.Tensor, # [batch_size, k]
  218. bonus_token_ids: torch.Tensor, # [batch_size]
  219. ) -> torch.Tensor:
  220. """Format output. Returns a matrix of token ids. When
  221. a token is rejected via rejection sampling, all subsequent
  222. token ids are set to -1 for the sequence.
  223. shape = [batch_size, k + num_bonus_tokens]
  224. """
  225. bonus_token_ids = bonus_token_ids.squeeze()
  226. batch_size, k = recovered_token_ids.shape
  227. # Determine the index of the first False value for each row.
  228. limits = (accepted == 0).max(1).indices
  229. limits[~(accepted == 0).any(1)] = k
  230. # Create masks using the indices.
  231. indices = torch.arange(k, device=accepted.device).unsqueeze(0)
  232. accepted_mask = indices < limits.unsqueeze(1)
  233. after_false_mask = indices == limits.unsqueeze(1)
  234. # Create an extended output tensor
  235. output_with_bonus_tokens = -torch.ones(
  236. (batch_size, k + self._num_bonus_tokens),
  237. dtype=self.token_id_dtype,
  238. device=accepted.device)
  239. output = output_with_bonus_tokens[:, :k]
  240. # Fill in the first k columns of the output tensor using masks and data
  241. # tensors.
  242. output[:, :k] = torch.where(accepted_mask, draft_token_ids,
  243. -torch.ones_like(draft_token_ids))
  244. # Fill the last column.
  245. # We check output directly as accepted may have True values inconsistent
  246. # with causal acceptance.
  247. output_with_bonus_tokens[:, -1] = torch.where(output[:, -1] != -1,
  248. bonus_token_ids, -1)
  249. # We disable bonus tokens because it causes corrupt KV cache for
  250. # proposal methods that require KV cache. We can fix it by "prefilling"
  251. # the bonus token in the proposer.
  252. output_with_bonus_tokens[:, -1] = -1
  253. # Fill the recovered token ids.
  254. output.mul_(~after_false_mask).add_(
  255. recovered_token_ids.mul(after_false_mask))
  256. self.num_accepted_tokens += accepted.sum()
  257. self.num_emitted_tokens += (output_with_bonus_tokens != -1).sum()
  258. self.num_draft_tokens += batch_size * k
  259. return output_with_bonus_tokens
  260. def _raise_if_incorrect_shape(
  261. self,
  262. target_probs: torch.Tensor,
  263. bonus_token_ids: torch.Tensor,
  264. draft_probs: torch.Tensor,
  265. draft_token_ids: torch.Tensor,
  266. ) -> None:
  267. (target_batch_size, num_target_probs,
  268. target_vocab_size) = target_probs.shape
  269. bonus_batch_size, num_bonus_tokens = bonus_token_ids.shape
  270. draft_batch_size, num_draft_probs, draft_vocab_size = draft_probs.shape
  271. draft_token_ids_batch_size, num_draft_token_ids = draft_token_ids.shape
  272. assert draft_batch_size == target_batch_size
  273. assert num_draft_probs == num_target_probs
  274. assert (draft_vocab_size == target_vocab_size
  275. ), f"{draft_vocab_size=} {target_vocab_size=}"
  276. assert draft_token_ids_batch_size == draft_batch_size
  277. assert num_draft_token_ids == num_draft_probs
  278. assert bonus_batch_size == target_batch_size
  279. assert num_bonus_tokens == self._num_bonus_tokens
  280. def _raise_if_incorrect_dtype(
  281. self,
  282. target_probs: torch.Tensor,
  283. bonus_token_ids: torch.Tensor,
  284. draft_probs: torch.Tensor,
  285. draft_token_ids: torch.Tensor,
  286. ) -> None:
  287. assert all(probs.dtype == self.probs_dtype
  288. for probs in [target_probs, draft_probs])
  289. assert all(token_ids.dtype == self.token_id_dtype
  290. for token_ids in [bonus_token_ids, draft_token_ids])
  291. def _raise_if_inconsistent_device(
  292. self,
  293. target_probs: torch.Tensor,
  294. bonus_token_ids: torch.Tensor,
  295. draft_probs: torch.Tensor,
  296. draft_token_ids: torch.Tensor,
  297. ) -> None:
  298. devices = [
  299. t.device for t in
  300. [target_probs, bonus_token_ids, draft_probs, draft_token_ids]
  301. ]
  302. assert all([devices[0] == device for device in devices])
  303. def _raise_if_out_of_bounds_vocab(
  304. self,
  305. vocab_size: int,
  306. bonus_token_ids: torch.Tensor,
  307. draft_token_ids: torch.Tensor,
  308. ) -> None:
  309. assert torch.all(bonus_token_ids < vocab_size)
  310. assert torch.all(bonus_token_ids >= 0)
  311. assert torch.all(draft_token_ids < vocab_size)
  312. assert torch.all(draft_token_ids >= 0)
  313. # torch.multinomial forces a GPU<->CPU sync.
  314. # Therefore, we use an optimized implementation instead that skips the sync.
  315. # Note that we always sample with replacement.
  316. # probs will be modified in place, but this is fine, as we pass
  317. # in a copy already.
  318. @torch.jit.script
  319. def _multinomial(
  320. probs: torch.Tensor,
  321. num_samples: int,
  322. ) -> torch.Tensor:
  323. if num_samples > 1:
  324. # This is equivalent to torch.repeat_interleaved (which also
  325. # forces a GPU<->CPU sync).
  326. probs = probs[:, None, :].expand(probs.shape[0], num_samples,
  327. probs.shape[1]).contiguous().view(
  328. -1, probs.shape[1])
  329. q = torch.empty_like(probs).exponential_(1.0)
  330. return probs.div_(q).argmax(dim=1).view(-1, num_samples)