rejection_sampler.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. from functools import cached_property
  2. from typing import Dict, List, Optional, Tuple
  3. import torch
  4. import torch.jit
  5. from aphrodite.modeling.layers.spec_decode_base_sampler import (
  6. SpecDecodeStochasticBaseSampler)
  7. class RejectionSampler(SpecDecodeStochasticBaseSampler):
  8. """Apply modified rejection sampling as described in "Accelerating Large
  9. Language Model Decoding with Speculative Sampling"
  10. https://arxiv.org/pdf/2302.01318.pdf.
  11. """
  12. def __init__(self,
  13. strict_mode: bool = False):
  14. """Create a rejection sampler.
  15. Args:
  16. strict_mode: Whether or not to perform shape/device/dtype checks
  17. during sampling. This catches correctness issues but adds
  18. nontrivial latency.
  19. """
  20. super().__init__(strict_mode=strict_mode)
  21. def forward(
  22. self,
  23. target_probs: torch.Tensor,
  24. bonus_token_ids: torch.Tensor,
  25. draft_probs: torch.Tensor,
  26. draft_token_ids: torch.Tensor,
  27. seeded_seqs: Optional[Dict[int, torch.Generator]] = None,
  28. ) -> torch.Tensor:
  29. """Sample token ids using rejection sampling. This accepts or rejects
  30. tokens proposed by the draft model using the probability of each token
  31. according to the draft and target models.
  32. In the worst case where all draft tokens are rejected, it is guaranteed
  33. one correct token will be emitted.
  34. In the case where all draft tokens are accepted, a bonus token will be
  35. accepted as its cheap to have the target model score this speculative
  36. sequence.
  37. Args:
  38. target_probs: The probability distribution over token ids given
  39. context according to the target model.
  40. shape = [batch_size, num_speculative_tokens, vocab_size]
  41. bonus_token_ids: The "bonus" token ids that are accepted iff all
  42. speculative tokens in a sequence are accepted.
  43. shape = [batch_size, num_bonus_tokens]
  44. draft_probs: The probability distribution over token ids given
  45. context according to the draft model.
  46. shape = [batch_size, num_speculative_tokens, vocab_size]
  47. draft_token_ids: The token ids that were sampled from the draft
  48. probabilities.
  49. shape = [batch_size, num_speculative_tokens]
  50. seeded_seqs: Dict of batch row index to torch generator, for
  51. sequences using seeded generation.
  52. Returns:
  53. output_token_ids: The token ids sampled via rejection sampling,
  54. or -1 if unable to sample a token because the previous token
  55. was rejected.
  56. shape = [batch_size, num_speculative_tokens + num_bonus_tokens]
  57. """
  58. # Only perform shape/dtype/device checking in strict mode, as it adds
  59. # overhead.
  60. if self._strict_mode:
  61. self._raise_if_incorrect_input(target_probs, draft_token_ids,
  62. bonus_token_ids, draft_probs)
  63. accepted, recovered_token_ids = (
  64. self._batch_modified_rejection_sampling(
  65. target_probs,
  66. draft_probs,
  67. draft_token_ids,
  68. seeded_seqs,
  69. ))
  70. output_token_ids = self._create_output(
  71. accepted,
  72. recovered_token_ids,
  73. draft_token_ids,
  74. bonus_token_ids,
  75. )
  76. return output_token_ids
  77. def _batch_modified_rejection_sampling(
  78. self,
  79. target_probs: torch.Tensor, # [batch_size, k, vocab_size]
  80. draft_probs: torch.Tensor, # [batch_size, k, vocab_size]
  81. draft_token_ids: torch.Tensor, # [batch_size, k]
  82. seeded_seqs: Optional[Dict[int, torch.Generator]],
  83. ) -> Tuple[torch.Tensor, torch.Tensor]:
  84. """Perform modified rejection sampling on each sequence.
  85. Returns:
  86. A tuple of two tensors:
  87. 0: A bool tensor of which tokens in each sequence is accepted.
  88. shape = [batch_size, k]
  89. 1: Token ids sampled from a recovered distribution, to be used
  90. when a token is rejected.
  91. shape = [batch_size, k]
  92. """
  93. batch_size, k, vocab_size = draft_probs.shape
  94. # shape [batch_size, k]
  95. accepted = self._get_accepted(target_probs, draft_probs,
  96. draft_token_ids, seeded_seqs)
  97. recovered_probs = self._get_recovered_probs(
  98. target_probs, draft_probs).reshape(batch_size * k, vocab_size)
  99. # NOTE: the recovered_probs are overwritten by this method.
  100. recovered_token_ids = _multinomial(
  101. recovered_probs,
  102. num_samples=1,
  103. k=k,
  104. seeded_seqs=seeded_seqs or {},
  105. ).reshape(batch_size, k)
  106. return accepted, recovered_token_ids
  107. def _get_accepted(
  108. self,
  109. target_probs: torch.Tensor, # [batch_size, k, vocab_size]
  110. draft_probs: torch.Tensor, # [batch_size, k, vocab_size]
  111. draft_token_ids: torch.Tensor, # [batch_size, k]
  112. seeded_seqs: Optional[Dict[int, torch.Generator]],
  113. ) -> torch.Tensor:
  114. r"""Create bool matrix over the proposed draft tokens. If
  115. True, then a token can be accepted, else it should be
  116. rejected.
  117. Given :math:`q(\hat{x}_{n+1}|x_1, \dots, x_n)`, the probability of
  118. :math:`\hat{x}_{n+1}` given context :math:`x_1, \dots, x_n` according
  119. to the target model, and :math:`p(\hat{x}_{n+1}|x_1, \dots, x_n)`, the
  120. same conditional probability according to the draft model, the token
  121. is accepted with probability:
  122. .. math::
  123. \min\left(1, \frac{q(\hat{x}_{n+1}|x_1, \dots, x_n)}
  124. {p(\hat{x}_{n+1}|x_1, \dots, x_n)}\right)
  125. This implementation does not apply causality. When using the output,
  126. if a token is rejected, subsequent tokens should not be used.
  127. Returns a bool tensor of shape [batch_size, k] specifying which tokens
  128. are accepted.
  129. """
  130. batch_size, k, _ = draft_probs.shape
  131. batch_indices = torch.arange(batch_size,
  132. device=target_probs.device)[:, None]
  133. probs_indicies = torch.arange(k, device=target_probs.device)
  134. # shape [batch_size, k]
  135. selected_draft_probs = draft_probs[batch_indices, probs_indicies,
  136. draft_token_ids]
  137. # shape [batch_size, k]
  138. selected_target_probs = target_probs[batch_indices, probs_indicies,
  139. draft_token_ids]
  140. if not seeded_seqs:
  141. uniform_rand = torch.rand_like(selected_target_probs)
  142. else:
  143. uniform_rand = torch.empty_like(selected_target_probs)
  144. non_seeded_indices = []
  145. for idx in range(batch_size):
  146. generator = seeded_seqs.get(idx)
  147. if generator is None:
  148. non_seeded_indices.append(idx)
  149. else:
  150. uniform_rand[idx, :] = torch.rand(
  151. 1,
  152. k,
  153. dtype=self.probs_dtype,
  154. device=target_probs.device,
  155. generator=generator)
  156. if non_seeded_indices:
  157. uniform_rand[non_seeded_indices, :] = torch.rand(
  158. len(non_seeded_indices),
  159. k,
  160. dtype=self.probs_dtype,
  161. device=target_probs.device)
  162. capped_ratio = torch.minimum(
  163. selected_target_probs / selected_draft_probs,
  164. torch.full((1, ), 1, device=target_probs.device))
  165. accepted = uniform_rand < capped_ratio
  166. return accepted
  167. def _get_recovered_probs(
  168. self,
  169. target_probs: torch.Tensor, # [k, vocab_size]
  170. draft_probs: torch.Tensor, # [k, vocab_size]
  171. ) -> torch.Tensor:
  172. r"""Create a probability distribution for each proposed token which can
  173. be sampled if the proposed token is rejected.
  174. When this routine is applied sequentially, the true distribution of the
  175. target model is recovered (within hardware numerics).
  176. The probability distribution used in this rejection case is constructed
  177. as follows. Given :math:`q(x|x_1, \dots, x_n)`, the probability of
  178. :math:`x` given context :math:`x_1, \dots, x_n` according to the target
  179. model and :math:`p(x|x_1, \dots, x_n)`, the same conditional probability
  180. according to the draft model:
  181. .. math::
  182. x_{n+1} \sim (q(x|x_1, \dots, x_n) - p(x|x_1, \dots, x_n))_+
  183. where :math:`(f(x))_+` is defined as:
  184. .. math::
  185. (f(x))_+ = \frac{\max(0, f(x))}{\sum_x \max(0, f(x))}
  186. Returns a tensor of shape [batch_size, k, vocab_size].
  187. Note: This batches operations on GPU and thus constructs the recovered
  188. distribution for all tokens, even if they are accepted. This causes
  189. division-by-zero errors, so we use self._smallest_positive_value to
  190. avoid that. This introduces some drift to the distribution.
  191. """
  192. _, k, _ = draft_probs.shape
  193. # shape [batch_size, k, vocab_size]
  194. difference = target_probs - draft_probs
  195. # TODO: Can we use logprobs instead of probs, and avoid the
  196. # division-by-zero errors without introducing distribution drift?
  197. # shape [batch_size, k, vocab_size]
  198. f = torch.clamp(difference, min=self._smallest_positive_value)
  199. # shape [batch_size, k, vocab_size]
  200. recovered_probs = f / torch.sum(f, dim=-1).reshape(-1, k, 1)
  201. return recovered_probs
  202. @cached_property
  203. def _smallest_positive_value(self) -> float:
  204. """Return the smallest positive value representable by the probs dtype.
  205. This value is used when constructing a distribution from which to sample
  206. recovered tokens in the first rejection case.
  207. See _get_recovered_probs for more details
  208. Note that this isn't actually the smallest positive value representable
  209. by float32, but the smallest positive normal value.
  210. See https://en.wikipedia.org/wiki/Subnormal_number for more information.
  211. """
  212. return torch.finfo(self.probs_dtype).tiny
  213. # torch.multinomial forces a GPU<->CPU sync.
  214. # Therefore, we use an optimized implementation instead that skips the sync.
  215. # Note that we always sample with replacement.
  216. # probs will be modified in place, but this is fine, as we pass
  217. # in a copy already.
  218. @torch.jit.script
  219. def _multinomial(
  220. probs: torch.Tensor,
  221. num_samples: int,
  222. k: int,
  223. seeded_seqs: Dict[int, torch.Generator],
  224. ) -> torch.Tensor:
  225. if num_samples > 1:
  226. # This is equivalent to torch.repeat_interleaved (which also
  227. # forces a GPU<->CPU sync).
  228. probs = probs[:, None, :].expand(probs.shape[0], num_samples,
  229. probs.shape[1]).contiguous().view(
  230. -1, probs.shape[1])
  231. q = torch.empty_like(probs)
  232. if not seeded_seqs:
  233. q.exponential_(1.0)
  234. else:
  235. non_seeded_indices: List[int] = []
  236. start = 0
  237. for idx in range(len(q) // k):
  238. end = start + k
  239. generator = seeded_seqs.get(idx)
  240. if generator is None:
  241. non_seeded_indices.extend(list(range(start, end)))
  242. else:
  243. q[start:end].exponential_(1.0, generator=generator)
  244. start = end
  245. q[non_seeded_indices].exponential_(1.0)
  246. return probs.div_(q).argmax(dim=1).view(-1, num_samples)