sampler.py 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999
  1. """A layer that samples the next tokens from the model's outputs."""
  2. import itertools
  3. import os
  4. import warnings
  5. from enum import IntEnum
  6. from math import inf
  7. from typing import Dict, List, Optional, Tuple
  8. import torch
  9. import torch.nn as nn
  10. from loguru import logger
  11. import aphrodite._custom_ops as ops
  12. from aphrodite.common.sampling_params import SamplingType
  13. from aphrodite.common.sequence import (CompletionSequenceGroupOutput, Logprob,
  14. PromptLogprobs, SampleLogprobs,
  15. SamplerOutput, SequenceOutput)
  16. from aphrodite.triton_utils import HAS_TRITON
  17. if HAS_TRITON:
  18. from aphrodite.modeling.layers.ops.sample import sample as sample_triton
  19. from aphrodite.modeling.sampling_metadata import (SamplingMetadata,
  20. SamplingTensors,
  21. SequenceGroupToSample)
  22. # (num_token_ids, num_parent_ids) per sequence group.
  23. SampleResultType = List[Tuple[List[int], List[int]]]
  24. # There isn't a "safe" temperature range for fp16 logits.
  25. # This value was chosen because 1/2e-5 is just under the 65k fp16 max, meaning
  26. # that this temperature well-uses the fp16 space after the logits are offset.
  27. _TEMPERATURE_MINIMUM = 2e-5
  28. # If enabled, we switch to a more performant implementation
  29. # of top-k and top-p
  30. APHRODITE_USE_SAMPLING_KERNELS = bool(int(
  31. os.getenv("APHRODITE_USE_SAMPLING_KERNELS", "0")))
  32. class SamplerID(IntEnum):
  33. # Mirror these in aphrodite/common/sampling_params.py
  34. # Values out of order to keep backwards compatibility
  35. # with Koboldcpp values
  36. DRY = 7
  37. PENALTIES = 6
  38. NO_REPEAT_NGRAM = 8
  39. TEMPERATURE = 5
  40. TOP_NSIGMA = 9
  41. TOP_P_TOP_K = 0
  42. TOP_A = 1
  43. MIN_P = 2
  44. TFS = 3
  45. ETA_CUTOFF = 10
  46. EPSILON_CUTOFF = 11
  47. TYPICAL_P = 4
  48. QUADRATIC = 12
  49. XTC = 13
  50. class Sampler(nn.Module):
  51. """Samples the next tokens from the model's outputs.
  52. This layer does the following:
  53. 1. Discard the hidden states that are not used for sampling (i.e., all
  54. tokens except the final one in each prompt).
  55. 2. Compute the logits for the next tokens.
  56. 3. Apply presence, frequency and repetition penalties.
  57. 4. Apply temperature scaling.
  58. 5. Apply top-p and top-k truncation.
  59. 6. Sample the next tokens.
  60. Here, each sequence group within the batch can have different sampling
  61. parameters (e.g., sampling method, temperature, top-p, top-k, etc.).
  62. The structure of the logits tensor is coupled with the seq_groups in
  63. sampling_metadata. Typically, each sequence in each seq_group has one row in
  64. logits for the next token to be sampled; however, for a seq_group with a
  65. prompt request with the prompt_logprobs sampling parameter, there are rows
  66. in logits for each token in the input prompt.
  67. """
  68. def __init__(self):
  69. super().__init__()
  70. # Whether or not the SamplerOutput should have on-device tensors
  71. # containing the sampled token ids and probabilities. This is used by
  72. # speculative decoding.
  73. self.include_gpu_probs_tensor = False
  74. self.should_modify_greedy_probs_inplace = False
  75. def _init_sampling_tensors(
  76. self,
  77. logits: torch.Tensor,
  78. sampling_metadata: SamplingMetadata,
  79. ):
  80. """The goal here is to reuse sampling tensors between similar decode
  81. runs. This is possible because sampling logic does not change between
  82. decodes of the same sequences.
  83. """
  84. _, vocab_size = logits.shape
  85. # First free any existing stored sampling tensors.
  86. # This is necessary because some sampling tensors may
  87. # have pinned memory.
  88. self._sampling_tensors = None
  89. # Initialize new sampling tensors
  90. (sampling_tensors, do_penalties, do_no_repeat_ngrams, do_temperatures,
  91. do_top_p_top_k, do_top_as, do_min_p, do_tfss, do_eta_cutoffs,
  92. do_epsilon_cutoffs, do_typical_ps, do_quadratic, do_xtc, do_nsigmas,
  93. do_dry, do_skew, do_temp_last
  94. ) = SamplingTensors.from_sampling_metadata(
  95. sampling_metadata, vocab_size, logits.device, logits.dtype)
  96. self._sampling_tensors = sampling_tensors
  97. self._do_penalties = do_penalties
  98. self._do_no_repeat_ngrams = do_no_repeat_ngrams
  99. self._do_temperatures = do_temperatures
  100. self._do_top_p_top_k = do_top_p_top_k
  101. self._do_top_as = do_top_as
  102. self._do_min_p = do_min_p
  103. self._do_tfss = do_tfss
  104. self._do_eta_cutoffs = do_eta_cutoffs
  105. self._do_epsilon_cutoffs = do_epsilon_cutoffs
  106. self._do_typical_ps = do_typical_ps
  107. self._do_quadratic = do_quadratic
  108. self._do_xtc = do_xtc
  109. self._do_nsgimas = do_nsigmas
  110. self._do_dry = do_dry
  111. self._do_skew = do_skew
  112. self._do_temp_last = do_temp_last
  113. def forward(
  114. self,
  115. logits: torch.Tensor,
  116. sampling_metadata: SamplingMetadata,
  117. ) -> Optional[SamplerOutput]:
  118. """
  119. Args:
  120. logits: (num_tokens, vocab_size).
  121. sampling_metadata: Metadata for sampling.
  122. """
  123. assert logits is not None
  124. _, vocab_size = logits.shape
  125. # Prepare sampling tensors with pinned memory to avoid blocking.
  126. if not sampling_metadata.reuse_sampling_tensors:
  127. self._init_sampling_tensors(logits, sampling_metadata)
  128. elif self._do_penalties or self._do_dry:
  129. # In this case, the sampling tensors logic depends on
  130. # "output_tokens" of a sequence. As a result, we cannot
  131. # reuse sampling tensors, since "output_tokens" changes
  132. # between decode runs.
  133. self._init_sampling_tensors(logits, sampling_metadata)
  134. assert self._sampling_tensors is not None
  135. sampling_tensors = self._sampling_tensors
  136. do_penalties = self._do_penalties
  137. do_no_repeat_ngrams = self._do_no_repeat_ngrams
  138. do_temperatures = self._do_temperatures
  139. do_top_p_top_k = self._do_top_p_top_k
  140. do_top_as = self._do_top_as
  141. do_min_p = self._do_min_p
  142. do_tfss = self._do_tfss
  143. do_eta_cutoffs = self._do_eta_cutoffs
  144. do_epsilon_cutoffs = self._do_epsilon_cutoffs
  145. do_typical_ps = self._do_typical_ps
  146. do_quadratic = self._do_quadratic
  147. do_xtc = self._do_xtc
  148. do_nsigmas = self._do_nsgimas
  149. do_dry = self._do_dry
  150. do_skew = self._do_skew
  151. do_temp_last = self._do_temp_last
  152. logits = _apply_min_tokens_penalty(logits, sampling_metadata)
  153. banned_tokens = _get_custom_token_bans(sampling_metadata)
  154. logits = _apply_token_bans(logits, banned_tokens)
  155. sampler_order = None
  156. if sampling_metadata.seq_groups:
  157. sampler_order = sampling_metadata.seq_groups[
  158. 0].sampling_params.sampler_priority
  159. # Warn if both custom order and temp_last are specified
  160. if sampler_order is not None and do_temp_last:
  161. logger.warning(
  162. "Both sampler_priority and temperature_last=True "
  163. "were specified. Using custom sampler_priority order "
  164. "and ignoring temperature_last.")
  165. if sampler_order is None:
  166. default_order = [
  167. SamplerID.DRY,
  168. SamplerID.PENALTIES,
  169. SamplerID.NO_REPEAT_NGRAM,
  170. SamplerID.TEMPERATURE,
  171. SamplerID.TOP_NSIGMA,
  172. SamplerID.TOP_P_TOP_K,
  173. SamplerID.TOP_A,
  174. SamplerID.MIN_P,
  175. SamplerID.TFS,
  176. SamplerID.ETA_CUTOFF,
  177. SamplerID.EPSILON_CUTOFF,
  178. SamplerID.TYPICAL_P,
  179. SamplerID.QUADRATIC,
  180. SamplerID.XTC,
  181. ]
  182. sampler_order = []
  183. for sampler_id in default_order:
  184. if sampler_id == SamplerID.TEMPERATURE and do_temp_last:
  185. continue
  186. sampler_order.append(sampler_id)
  187. if sampler_id == SamplerID.XTC and do_temp_last:
  188. sampler_order.append(SamplerID.TEMPERATURE)
  189. if sampling_metadata.seq_groups and sampling_metadata.seq_groups[
  190. 0].is_prompt:
  191. logger.debug("Sampler execution order: ")
  192. for i, sampler_id in enumerate(sampler_order, 1):
  193. logger.debug(f"{i}. {SamplerID(sampler_id).name}")
  194. enabled_samplers = []
  195. # ruff: noqa: E701
  196. if do_penalties: enabled_samplers.append("PENALTIES")
  197. if do_no_repeat_ngrams: enabled_samplers.append("NO_REPEAT_NGRAM")
  198. if do_temperatures: enabled_samplers.append("TEMPERATURE")
  199. if do_top_p_top_k: enabled_samplers.append("TOP_P_TOP_K")
  200. if do_top_as: enabled_samplers.append("TOP_A")
  201. if do_min_p: enabled_samplers.append("MIN_P")
  202. if do_tfss: enabled_samplers.append("TFS")
  203. if do_eta_cutoffs: enabled_samplers.append("ETA_CUTOFF")
  204. if do_epsilon_cutoffs: enabled_samplers.append("EPSILON_CUTOFF")
  205. if do_typical_ps: enabled_samplers.append("TYPICAL_P")
  206. if do_quadratic: enabled_samplers.append("QUADRATIC")
  207. if do_xtc: enabled_samplers.append("XTC")
  208. if do_nsigmas: enabled_samplers.append("TOP_NSIGMA")
  209. if do_dry: enabled_samplers.append("DRY")
  210. if do_skew: enabled_samplers.append("SKEW")
  211. logger.debug(f"Enabled samplers: {', '.join(enabled_samplers)}")
  212. for sampler_id in sampler_order:
  213. if sampler_id == SamplerID.DRY and do_dry:
  214. if (sampling_metadata.seq_groups and
  215. sampling_metadata.seq_groups[0].is_prompt):
  216. logger.debug(
  217. f"Applying DRY with dry_multiplier: "
  218. f"{sampling_tensors.dry_multipliers}.")
  219. logits = _apply_dry(
  220. logits,
  221. sampling_tensors.prompt_tokens,
  222. sampling_tensors.output_tokens,
  223. sampling_tensors.dry_multipliers,
  224. sampling_tensors.dry_bases,
  225. sampling_tensors.dry_allowed_lengths,
  226. sampling_tensors.dry_sequence_breaker_ids,
  227. sampling_tensors.dry_ranges)
  228. elif sampler_id == SamplerID.PENALTIES and do_penalties:
  229. if (sampling_metadata.seq_groups and
  230. sampling_metadata.seq_groups[0].is_prompt):
  231. logger.debug(
  232. "Applying penalties with "
  233. f"pres_pen: {sampling_tensors.presence_penalties}, "
  234. f"freq_pen: {sampling_tensors.frequency_penalties}, "
  235. f"rep_pen: {sampling_tensors.repetition_penalties}.")
  236. logits = _apply_penalties(
  237. logits, sampling_tensors.prompt_tokens,
  238. sampling_tensors.output_tokens,
  239. sampling_tensors.presence_penalties,
  240. sampling_tensors.frequency_penalties,
  241. sampling_tensors.repetition_penalties)
  242. elif sampler_id == SamplerID.NO_REPEAT_NGRAM and \
  243. do_no_repeat_ngrams:
  244. if (sampling_metadata.seq_groups and
  245. sampling_metadata.seq_groups[0].is_prompt):
  246. logger.debug(
  247. "Applying no_repeat_ngram with no_repeat_ngram_size: "
  248. f"{sampling_tensors.no_repeat_ngram_sizes}.")
  249. logits = _apply_no_repeat_ngram(
  250. logits,
  251. sampling_tensors.prompt_tokens,
  252. sampling_tensors.no_repeat_ngram_sizes)
  253. elif sampler_id == SamplerID.TEMPERATURE and do_temperatures:
  254. if (sampling_metadata.seq_groups and
  255. sampling_metadata.seq_groups[0].is_prompt):
  256. logger.debug(
  257. "Applying temperatures with temperature: "
  258. f"{sampling_tensors.temperatures}, "
  259. f"dynatemp_min: {sampling_tensors.dynatemp_mins}, "
  260. f"dynatemp_max: {sampling_tensors.dynatemp_maxs}, "
  261. f"dynamtep_exp: {sampling_tensors.dynatemp_exps}.")
  262. _apply_temperatures(
  263. logits, sampling_tensors.temperatures,
  264. sampling_tensors.dynatemp_mins,
  265. sampling_tensors.dynatemp_maxs,
  266. sampling_tensors.dynatemp_exps)
  267. elif sampler_id == SamplerID.TOP_NSIGMA and do_nsigmas:
  268. if (sampling_metadata.seq_groups and
  269. sampling_metadata.seq_groups[0].is_prompt):
  270. logger.debug(
  271. "Applying Top-Nsigma with nsigma: "
  272. f"{sampling_tensors.nsigmas}")
  273. logits = _apply_top_nsigma(
  274. logits, sampling_tensors.nsigmas)
  275. elif sampler_id == SamplerID.TOP_P_TOP_K and do_top_p_top_k and \
  276. not APHRODITE_USE_SAMPLING_KERNELS:
  277. if (sampling_metadata.seq_groups and
  278. sampling_metadata.seq_groups[0].is_prompt):
  279. logger.debug(
  280. "Applying Top-p and Top-k with top-p: "
  281. f"{sampling_tensors.top_ps}, top_k: "
  282. f"{sampling_tensors.top_ks}.")
  283. logits = _apply_top_k_top_p(
  284. logits, sampling_tensors.top_ps,
  285. sampling_tensors.top_ks)
  286. elif sampler_id == SamplerID.TOP_A and do_top_as:
  287. if (sampling_metadata.seq_groups and
  288. sampling_metadata.seq_groups[0].is_prompt):
  289. logger.debug(
  290. "Applying Top-a with Top-a: "
  291. f"{sampling_tensors.top_as}.")
  292. logits = _apply_top_a(
  293. logits, sampling_tensors.top_as)
  294. elif sampler_id == SamplerID.MIN_P and do_min_p:
  295. if (sampling_metadata.seq_groups and
  296. sampling_metadata.seq_groups[0].is_prompt):
  297. logger.debug(
  298. "Applying Min-p with Min-p: "
  299. f"{sampling_tensors.min_ps}.")
  300. logits = _apply_min_p(
  301. logits, sampling_tensors.min_ps)
  302. elif sampler_id == SamplerID.TFS and do_tfss:
  303. if (sampling_metadata.seq_groups and
  304. sampling_metadata.seq_groups[0].is_prompt):
  305. logger.debug(
  306. "Applying Tail-Free Sampling with tfs: "
  307. f"{sampling_tensors.tfss}.")
  308. logits = _apply_tfs(
  309. logits, sampling_tensors.tfss)
  310. elif sampler_id == SamplerID.ETA_CUTOFF and do_eta_cutoffs:
  311. if (sampling_metadata.seq_groups and
  312. sampling_metadata.seq_groups[0].is_prompt):
  313. logger.debug(
  314. "Applying ETA Cutoff with eta_cutoff: "
  315. f"{sampling_tensors.eta_cutoffs}.")
  316. logits = _apply_eta_cutoff(
  317. logits, sampling_tensors.eta_cutoffs)
  318. elif sampler_id == SamplerID.EPSILON_CUTOFF and do_epsilon_cutoffs:
  319. if (sampling_metadata.seq_groups and
  320. sampling_metadata.seq_groups[0].is_prompt):
  321. logger.debug(
  322. "Applying Epsilon Cutoff with epsilon_cutoff: "
  323. f"{sampling_tensors.epsilon_cutoffs}.")
  324. logits = _apply_epsilon_cutoff(
  325. logits, sampling_tensors.epsilon_cutoffs)
  326. elif sampler_id == SamplerID.TYPICAL_P and do_typical_ps:
  327. if (sampling_metadata.seq_groups and
  328. sampling_metadata.seq_groups[0].is_prompt):
  329. logger.debug(
  330. "Applying Locally Typical Sampling with typical_p: "
  331. f"{sampling_tensors.typical_ps}.")
  332. logits = _apply_typical_sampling(
  333. logits, sampling_tensors.typical_ps)
  334. elif sampler_id == SamplerID.QUADRATIC and do_quadratic:
  335. if (sampling_metadata.seq_groups and
  336. sampling_metadata.seq_groups[0].is_prompt):
  337. logger.debug(
  338. "Applying Quadratic and Cubic Sampling with "
  339. "smoothing_factors: "
  340. f"{sampling_tensors.smoothing_factors},"
  341. f" smoothing_curves: "
  342. f"{sampling_tensors.smoothing_curves}.")
  343. logits = _apply_quadratic_sampling(
  344. logits, sampling_tensors.smoothing_factors,
  345. sampling_tensors.smoothing_curves)
  346. elif sampler_id == SamplerID.XTC and do_xtc:
  347. if (sampling_metadata.seq_groups and
  348. sampling_metadata.seq_groups[0].is_prompt):
  349. logger.debug(
  350. "Applying Exclude Top Choices sampling with "
  351. f"xtc_threshold: {sampling_tensors.xtc_thresholds}, "
  352. "xtc_probability: "
  353. f"{sampling_tensors.xtc_probabilities}.")
  354. logits = _apply_xtc_sampling(
  355. logits, sampling_tensors.xtc_thresholds,
  356. sampling_tensors.xtc_probabilities)
  357. # We use float32 for probabilities and log probabilities.
  358. # Compute the probabilities.
  359. probs = torch.softmax(logits, dim=-1, dtype=torch.float)
  360. # skew needs to be applied post-softmax
  361. if do_skew:
  362. if (sampling_metadata.seq_groups and
  363. sampling_metadata.seq_groups[0].is_prompt):
  364. logger.debug(
  365. "Applying Skew sampling with skew: "
  366. f"{sampling_tensors.skews}.")
  367. # reference: https://github.com/turboderp/exllamav2/commit/1de4cdd70b09208e7b4f17ee322c190e16f60efd
  368. cum_probs = torch.cumsum(probs, dim=-1)
  369. cum_probs = torch.pow(cum_probs, torch.exp(
  370. sampling_tensors.skews).unsqueeze(dim=1))
  371. probs = torch.diff(cum_probs, dim=-1,
  372. prepend=torch.zeros_like(cum_probs[..., :1]))
  373. logits = torch.log(probs)
  374. # Compute the log probabilities.
  375. logprobs = torch.log_softmax(logits, dim=-1, dtype=torch.float)
  376. # Sample the next tokens.
  377. sample_results, maybe_sampled_tokens_tensor = _sample(
  378. probs,
  379. logprobs,
  380. sampling_metadata,
  381. sampling_tensors,
  382. include_gpu_probs_tensor=self.include_gpu_probs_tensor,
  383. modify_greedy_probs=self._should_modify_greedy_probs_inplace,
  384. )
  385. if self.include_gpu_probs_tensor:
  386. assert maybe_sampled_tokens_tensor is not None
  387. on_device_tensors = (probs, logprobs, maybe_sampled_tokens_tensor)
  388. else:
  389. on_device_tensors = None
  390. # Get the logprobs query results.
  391. prompt_logprobs = None
  392. sample_logprobs = None
  393. if not sampling_metadata.skip_sampler_cpu_output:
  394. prompt_logprobs, sample_logprobs = _get_logprobs(
  395. logprobs, sampling_metadata, sample_results)
  396. return _build_sampler_output(
  397. sample_results,
  398. sampling_metadata,
  399. prompt_logprobs,
  400. sample_logprobs,
  401. on_device_tensors=on_device_tensors,
  402. skip_sampler_cpu_output=sampling_metadata.skip_sampler_cpu_output)
  403. @property
  404. def _should_modify_greedy_probs_inplace(self) -> bool:
  405. """Whether or not the sampler should modify the probability distribution
  406. of greedily-sampled tokens such that multinomial sampling would sample
  407. the greedily-sampled token.
  408. In other words, if True then we set the probability of the greedily-
  409. sampled token to 1.
  410. This is used by speculative decoding, which requires that the sampling
  411. method be encoded into the probability distribution.
  412. """
  413. return self.should_modify_greedy_probs_inplace
  414. def _get_bin_counts_and_mask(
  415. tokens: torch.Tensor,
  416. vocab_size: int,
  417. num_seqs: int,
  418. ) -> Tuple[torch.Tensor, torch.Tensor]:
  419. # Compute the bin counts for the tokens.
  420. # vocab_size + 1 for padding.
  421. bin_counts = torch.zeros((num_seqs, vocab_size + 1),
  422. dtype=torch.long,
  423. device=tokens.device)
  424. bin_counts.scatter_add_(1, tokens, torch.ones_like(tokens))
  425. bin_counts = bin_counts[:, :vocab_size]
  426. mask = bin_counts > 0
  427. return bin_counts, mask
  428. def _get_custom_token_bans(
  429. sampling_metadata: SamplingMetadata) -> List[List[int]]:
  430. assert sampling_metadata.seq_groups is not None
  431. banned_tokens: List[List[int]] = []
  432. for i, seq_group in enumerate(sampling_metadata.seq_groups):
  433. sampling_params = sampling_metadata.seq_groups[i].sampling_params
  434. seq_ids = seq_group.seq_ids
  435. custom_token_bans = sampling_params.custom_token_bans
  436. if (i < sampling_metadata.num_prompts
  437. and sampling_params.prompt_logprobs is not None):
  438. prompt_len = len(seq_group.prompt_logprob_indices)
  439. banned_tokens += [custom_token_bans] * (prompt_len - 1)
  440. banned_tokens += [custom_token_bans] * len(seq_ids)
  441. return banned_tokens
  442. def _apply_penalties(logits: torch.Tensor, prompt_tokens_tensor: torch.Tensor,
  443. output_tokens_tensor: torch.Tensor,
  444. presence_penalties: torch.Tensor,
  445. frequency_penalties: torch.Tensor,
  446. repetition_penalties: torch.Tensor) -> torch.Tensor:
  447. num_seqs, vocab_size = logits.shape
  448. _, prompt_mask = _get_bin_counts_and_mask(prompt_tokens_tensor, vocab_size,
  449. num_seqs)
  450. output_bin_counts, output_mask = _get_bin_counts_and_mask(
  451. output_tokens_tensor, vocab_size, num_seqs)
  452. repetition_penalties = repetition_penalties[:, None].repeat(1, vocab_size)
  453. repetition_penalties[~(prompt_mask | output_mask)] = 1.0
  454. logits = torch.where(logits > 0, logits / repetition_penalties,
  455. logits * repetition_penalties)
  456. # We follow the definition in OpenAI API.
  457. # Refer to https://platform.openai.com/docs/api-reference/parameter-details
  458. logits -= frequency_penalties.unsqueeze_(dim=1) * output_bin_counts
  459. logits -= presence_penalties.unsqueeze_(dim=1) * output_mask
  460. return logits
  461. def _apply_temperatures(
  462. logits: torch.Tensor,
  463. temperatures: torch.Tensor,
  464. dynatemp_mins: torch.Tensor,
  465. dynatemp_maxs: torch.Tensor,
  466. dynatemp_exps: torch.Tensor,
  467. ) -> None:
  468. dynatemp_mask = (dynatemp_mins != 0) | (dynatemp_maxs != 0)
  469. dynatemp_mins = dynatemp_mins[dynatemp_mask]
  470. dynatemp_maxs = dynatemp_maxs[dynatemp_mask]
  471. dynatemp_exps = dynatemp_exps[dynatemp_mask]
  472. dynatemp_logits = logits[dynatemp_mask]
  473. dynatemp_shifted_logits = torch.log_softmax(dynatemp_logits, dim=-1)
  474. dynatemp_probs = dynatemp_shifted_logits.exp()
  475. dynatemp_entropies = -(dynatemp_probs *
  476. dynatemp_shifted_logits).nansum(dim=-1)
  477. dynatemp_max_entropies = torch.log_(
  478. (dynatemp_logits > float("-inf")).sum(dim=-1).float())
  479. normalized_entropies = dynatemp_entropies.div_(dynatemp_max_entropies)
  480. dyn_temp = (dynatemp_mins + (dynatemp_maxs - dynatemp_mins) *
  481. normalized_entropies.pow_(dynatemp_exps))
  482. temperatures[dynatemp_mask] = dyn_temp
  483. temperatures[temperatures.isnan()] = _TEMPERATURE_MINIMUM
  484. temperatures[temperatures <= _TEMPERATURE_MINIMUM] = _TEMPERATURE_MINIMUM
  485. # To prevent saturation of top logits, we shift the range to [-inf, 1]
  486. # Why align to 1, instead of 0? Because [0, 1] holds 25% of all floats.
  487. # Why mask? So we aren't potentially discarding data in milder temps.
  488. low_temps = temperatures < 0.1
  489. logits[low_temps] -= logits.max(dim=-1, keepdim=True).values[low_temps] - 1
  490. logits.div_(temperatures.unsqueeze(dim=1))
  491. def _apply_token_bans(logits: torch.Tensor,
  492. banned_tokens: List[List[int]]) -> torch.Tensor:
  493. for i, banned_token_ids in enumerate(banned_tokens):
  494. if i >= logits.size(0):
  495. break
  496. if not banned_token_ids:
  497. continue
  498. logits[i, banned_token_ids] = -float("inf")
  499. return logits
  500. def _apply_min_tokens_penalty(
  501. logits: torch.Tensor,
  502. sampling_metadata: SamplingMetadata,
  503. ) -> torch.Tensor:
  504. """Apply min_tokens penalty which sets stop tokens to -inf if min_tokens
  505. have not been generated yet
  506. """
  507. # list of indices in logits that will be set to -inf
  508. logits_to_penalize = []
  509. logits_applied = 0
  510. for seq_group in sampling_metadata.seq_groups:
  511. seq_ids = seq_group.seq_ids
  512. sampling_params = seq_group.sampling_params
  513. sample_indices = seq_group.sample_indices
  514. logits_applied += len(sample_indices) + len(
  515. seq_group.prompt_logprob_indices)
  516. if not seq_group.do_sample:
  517. continue
  518. start_idx = sample_indices[0]
  519. min_tokens = sampling_params.min_tokens
  520. token_ids_to_penalize = sampling_params.all_stop_token_ids
  521. if min_tokens > 0 and token_ids_to_penalize:
  522. seqs_to_penalize = []
  523. for j, seq_id in enumerate(seq_ids):
  524. seq_data = seq_group.seq_data[seq_id]
  525. if len(seq_data.output_token_ids_array) < min_tokens:
  526. seqs_to_penalize.append(j)
  527. if seqs_to_penalize:
  528. # convert to the index into logits
  529. seqs_to_penalize = [start_idx + j for j in seqs_to_penalize]
  530. # itertools.product pairs each seq index with every token id
  531. logits_to_penalize.extend(
  532. itertools.product(seqs_to_penalize, token_ids_to_penalize))
  533. if logits_to_penalize:
  534. # use zip and * to group indices along each dimension
  535. # eg. [ (1,2), (1,3), (5,6) ] -> ( (1,1,5), (2,3,6) )
  536. logits[tuple(zip(*logits_to_penalize))] = -float("inf")
  537. # verifies that no rows in logits were missed unexpectedly
  538. assert logits_applied == logits.shape[0]
  539. return logits
  540. def _apply_dry(
  541. logits: torch.Tensor,
  542. input_token_ids: torch.Tensor,
  543. output_token_ids: torch.Tensor,
  544. multipliers: torch.Tensor,
  545. bases: torch.Tensor,
  546. allowed_lengths: torch.Tensor,
  547. sequence_breakers_ids: torch.Tensor,
  548. ranges: torch.Tensor,
  549. ) -> torch.Tensor:
  550. """
  551. Apply Don't Repeat Yourself (DRY) sampling to the logits.
  552. Reference: https://github.com/oobabooga/text-generation-webui/pull/5677
  553. """
  554. if torch.all(multipliers == 0):
  555. return logits
  556. # DRY needs to be applied to both input AND output tokens
  557. input_ids = torch.cat((input_token_ids, output_token_ids), dim=1)
  558. vocab_size = logits.size(-1)
  559. def compute_z_array(s: List[int], end: int, search_start: int) -> List[int]:
  560. """
  561. Compute Z array using two-pointer technique for linear time complexity
  562. """
  563. z = [0] * len(s)
  564. right = end - 1
  565. left = end - 1
  566. while right >= search_start:
  567. while left == right and left >= search_start:
  568. if s[right] == s[end]:
  569. break
  570. right -= 1
  571. left -= 1
  572. while left >= search_start and s[left] == s[end - (right - left)]:
  573. z[right] += 1
  574. left -= 1
  575. helper = right
  576. while right > left:
  577. right -= 1
  578. if left == right:
  579. break
  580. z[right] = min(z[end - (helper - right)], right - left)
  581. if left >= search_start and right - z[right] <= left:
  582. break
  583. return z
  584. # Process each sequence in the batch
  585. for i, (input_ids_row, logits_row) in enumerate(zip(input_ids, logits)):
  586. multiplier = multipliers[i].item()
  587. if multiplier == 0:
  588. continue
  589. seq_breakers = set(sequence_breakers_ids[i].tolist())
  590. input_ids_list = input_ids_row.tolist()
  591. last_token = input_ids_list[-1]
  592. if last_token in seq_breakers:
  593. continue
  594. range_limit = ranges[i].item()
  595. if range_limit == 0:
  596. search_start = 0
  597. else:
  598. search_start = max(0, len(input_ids_list) - range_limit)
  599. # Find max match length based on sequence breakers
  600. max_match_length = 0
  601. MAX_LENGTH = min(len(input_ids_list), 1000) # Prevent overflow
  602. while (max_match_length < MAX_LENGTH and
  603. input_ids_list[len(input_ids_list) - max_match_length - 1]
  604. not in seq_breakers):
  605. max_match_length += 1
  606. z_array = compute_z_array(
  607. input_ids_list, len(input_ids_list) - 1, search_start)
  608. z_array = [min(length, max_match_length) for length in z_array]
  609. penalties = {}
  610. allowed_length = allowed_lengths[i]
  611. base = bases[i]
  612. for idx, match_length in enumerate(z_array[:-1]):
  613. if match_length >= allowed_length:
  614. next_token = input_ids_list[idx + 1]
  615. if (next_token >= vocab_size or next_token in
  616. seq_breakers):
  617. continue
  618. penalty = multiplier * (base ** (match_length - allowed_length))
  619. penalties[next_token] = max(
  620. penalty, penalties.get(next_token, 0))
  621. for token, penalty in penalties.items():
  622. logits_row[token] -= penalty
  623. return logits
  624. def _apply_no_repeat_ngram(
  625. logits: torch.Tensor,
  626. input_ids: torch.Tensor,
  627. ngram_size: torch.Tensor,
  628. ) -> torch.Tensor:
  629. """Apply no-repeat-ngram penalty which sets logits to -inf for tokens that
  630. would create a repeated n-gram.
  631. """
  632. if torch.all(ngram_size == 0):
  633. return logits
  634. batch_size = logits.shape[0]
  635. for i in range(batch_size):
  636. size = int(ngram_size[i].item())
  637. if size == 0:
  638. continue
  639. cur_len = len(input_ids[i])
  640. if cur_len < size:
  641. continue
  642. banned_tokens = _calc_banned_ngram_tokens(
  643. ngram_size=size,
  644. prev_input_ids=input_ids[i],
  645. cur_len=cur_len-1
  646. )
  647. if banned_tokens:
  648. logits[i, banned_tokens] = -float("inf")
  649. return logits
  650. def _apply_top_k_top_p(
  651. logits: torch.Tensor,
  652. p: torch.Tensor,
  653. k: torch.Tensor,
  654. ) -> torch.Tensor:
  655. logits_sort, logits_idx = logits.sort(dim=-1, descending=False)
  656. # Apply top-k.
  657. top_k_mask = logits_sort.size(1) - k.to(torch.long)
  658. # Get all the top_k values.
  659. top_k_mask = logits_sort.gather(1, top_k_mask.unsqueeze(dim=1))
  660. top_k_mask = logits_sort < top_k_mask
  661. logits_sort.masked_fill_(top_k_mask, -float("inf"))
  662. # Apply top-p.
  663. probs_sort = logits_sort.softmax(dim=-1)
  664. probs_sum = probs_sort.cumsum(dim=-1)
  665. top_p_mask = probs_sum <= 1 - p.unsqueeze(dim=1)
  666. # at least one
  667. top_p_mask[:, -1] = False
  668. logits_sort.masked_fill_(top_p_mask, -float("inf"))
  669. # Re-sort the probabilities.
  670. src = torch.arange(logits_idx.shape[-1],
  671. device=logits_idx.device).expand_as(logits_idx)
  672. logits_idx_inv = torch.empty_like(logits_idx).scatter_(dim=-1,
  673. index=logits_idx,
  674. src=src)
  675. logits = torch.gather(logits_sort, dim=-1, index=logits_idx_inv)
  676. return logits
  677. def _apply_min_p(
  678. logits: torch.Tensor,
  679. min_p: torch.Tensor,
  680. ) -> torch.Tensor:
  681. """
  682. Adapted from
  683. https://github.com/oobabooga/text-generation-webui/blob/3146124ec01f02c8fb1650a6517cf1b60b537aaf/modules/sampler_hijack.py#L16C17-L16C17
  684. """
  685. probs = torch.softmax(logits, dim=-1)
  686. top_probs, _ = probs.max(dim=-1, keepdim=True)
  687. scaled_min_p = min_p.unsqueeze_(dim=1) * top_probs
  688. tokens_to_remove = probs < scaled_min_p
  689. logits = logits.masked_fill_(tokens_to_remove, -float("inf"))
  690. return logits
  691. def _apply_top_a(
  692. logits: torch.Tensor,
  693. top_a: torch.Tensor,
  694. ) -> torch.Tensor:
  695. probs = torch.softmax(logits, dim=-1)
  696. top_probs, _ = probs.max(dim=-1, keepdim=True)
  697. threshold = torch.pow(top_probs, 2) * top_a.unsqueeze_(dim=1)
  698. tokens_to_remove = probs < threshold
  699. logits = logits.masked_fill_(tokens_to_remove, -float("inf"))
  700. return logits
  701. def _apply_tfs(
  702. logits: torch.Tensor,
  703. tfs: torch.Tensor,
  704. ) -> torch.Tensor:
  705. logits_sort, logits_idx = logits.sort(dim=-1, descending=True)
  706. d2 = logits_sort.softmax(dim=-1).diff().diff().abs()
  707. normalized_d2 = d2 / torch.sum(d2, dim=-1, keepdim=True)
  708. curvature_cdf = torch.cumsum(normalized_d2, dim=-1)
  709. tfs_mask = curvature_cdf > tfs.unsqueeze(dim=-1)
  710. tfs_mask = torch.cat(
  711. (
  712. torch.zeros(
  713. logits.shape[0], 1, dtype=torch.bool, device=logits.device),
  714. tfs_mask,
  715. torch.ones(
  716. logits.shape[0], 1, dtype=torch.bool, device=logits.device),
  717. ),
  718. dim=-1,
  719. )
  720. logits_sort[tfs_mask] = -float("inf")
  721. logits = torch.gather(logits_sort,
  722. dim=-1,
  723. index=torch.argsort(logits_idx, dim=-1))
  724. return logits
  725. def _apply_eta_cutoff(
  726. logits: torch.Tensor,
  727. eta_cutoff: torch.Tensor,
  728. ) -> torch.Tensor:
  729. shifted_logits = torch.log_softmax(logits, dim=-1)
  730. probs = shifted_logits.exp()
  731. neg_entropy = (probs * shifted_logits).nansum(dim=-1)
  732. eps = torch.min(eta_cutoff,
  733. torch.sqrt(eta_cutoff) *
  734. torch.exp(neg_entropy)).unsqueeze(dim=1)
  735. eta_mask = probs < eps
  736. # guard against nulling out all the logits
  737. top_idx = torch.argmax(probs, dim=1, keepdim=True)
  738. eta_mask.scatter_(dim=1, index=top_idx, value=False)
  739. logits[eta_mask] = -float("inf")
  740. return logits
  741. def _apply_epsilon_cutoff(
  742. logits: torch.Tensor,
  743. epsilon_cutoff: torch.Tensor,
  744. ) -> torch.Tensor:
  745. probs = logits.softmax(dim=-1)
  746. eps_mask = probs < epsilon_cutoff.unsqueeze(dim=1)
  747. # guard against nulling out all the logits
  748. top_idx = torch.argmax(probs, dim=1, keepdim=True)
  749. eps_mask.scatter_(dim=1, index=top_idx, value=False)
  750. logits[eps_mask] = -float("inf")
  751. return logits
  752. def _apply_typical_sampling(
  753. logits: torch.Tensor,
  754. typical_p: torch.Tensor,
  755. ) -> torch.Tensor:
  756. shifted_logits = torch.log_softmax(logits, dim=-1)
  757. probs = shifted_logits.exp()
  758. neg_entropy = (probs * shifted_logits).nansum(dim=-1, keepdim=True)
  759. surprisal_deviations = (neg_entropy - shifted_logits).abs()
  760. _, indices = torch.sort(surprisal_deviations)
  761. reordered_probs = probs.gather(-1, indices)
  762. typ_mask_sorted = reordered_probs.cumsum(dim=-1) >= typical_p.unsqueeze(
  763. dim=1)
  764. min_tokens_to_keep = 1
  765. # Keep at least min_tokens_to_keep
  766. typ_mask_sorted[..., :min_tokens_to_keep] = 0
  767. typ_mask = typ_mask_sorted.scatter(1, indices, typ_mask_sorted)
  768. logits[typ_mask] = -float("inf")
  769. return logits
  770. def _apply_quadratic_sampling(
  771. logits: torch.Tensor,
  772. smoothing_factor: torch.Tensor,
  773. smoothing_curve: torch.Tensor,
  774. ) -> torch.Tensor:
  775. """
  776. Applies a quadratic transformation to the logits based on the
  777. provided smoothing factors and curves. The transformation is
  778. centered around the maximum logit value in the batch.
  779. The transformation involves a quadratic and cubic term, with the
  780. cubic term controlled by the smoothing curve. The quadratic term is
  781. scaled by the smoothing factor, and the cubic term is scaled by the
  782. product of the smoothing factor and the smoothing curve.
  783. params:
  784. logits (torch.Tensor): The logits to be transformed.
  785. smoothing_factors (torch.Tensor): The factors to scale the quadratic
  786. term in the transformation.
  787. smoothing_curves (torch.Tensor): The factors to scale the cubic term
  788. in the transformation.
  789. returns:
  790. torch.Tensor: The transformed logits.
  791. Credits: @kalomaze
  792. """
  793. mask = smoothing_factor != 0
  794. smoothing_factor.unsqueeze_(dim=1)
  795. smoothing_curve.unsqueeze_(dim=1)
  796. k = smoothing_factor * (3 - smoothing_curve) / 2
  797. s = smoothing_factor * (smoothing_curve - 1) / 2
  798. quadlogits = logits[mask] # limit to logits using this sampler
  799. max_logits = quadlogits.max(dim=-1, keepdim=True).values
  800. # Construct the delta from each logit to its new value
  801. diff = quadlogits - max_logits
  802. diff -= diff**2 * (s[mask] * diff - k[mask])
  803. diff[diff != diff] = 0 # Eliminate NaNs due to infs
  804. logits[mask] -= diff
  805. return logits
  806. def _apply_xtc_sampling(
  807. logits: torch.Tensor,
  808. xtc_thresholds: torch.Tensor,
  809. xtc_probabilities: torch.Tensor,
  810. ) -> torch.Tensor:
  811. """Apply Exclude Top Choices (XTC) sampling to the logits.
  812. Reference: https://github.com/oobabooga/text-generation-webui/pull/6335
  813. Args:
  814. logits: (num_tokens, vocab_size) The input logits.
  815. xtc_thresholds: (num_tokens,) The threshold for each token.
  816. xtc_probabilities: (num_tokens,) The probability of applying XTC
  817. for each token.
  818. Returns:
  819. torch.Tensor: The modified logits.
  820. """
  821. apply_xtc = torch.rand_like(xtc_probabilities) < xtc_probabilities
  822. if not apply_xtc.any():
  823. return logits
  824. probs = torch.softmax(logits, dim=-1)
  825. sorted_probs, sorted_indices = torch.sort(probs, descending=True, dim=-1)
  826. # Find indices where the next probability is above the threshold
  827. # Skips the top choice, which later on becomes skipping the last choice.
  828. above_threshold = sorted_probs[..., 1:] >= xtc_thresholds.unsqueeze(-1)
  829. # Apply XTC only to rows where it should be applied
  830. for i in range(logits.shape[0]):
  831. if apply_xtc[i]:
  832. # Count logits above the threshold (skipping the first)
  833. indices_to_remove = above_threshold[i].count_nonzero(dim=-1).item()
  834. if indices_to_remove > 0:
  835. # Implies the top logit and at least one other is >= threshold.
  836. # Mask out above_thresh logits except the last/lowest one.
  837. logits[i].scatter_(
  838. 0, sorted_indices[i, :indices_to_remove], -float('inf'))
  839. return logits
  840. def _apply_top_nsigma(
  841. logits: torch.Tensor,
  842. nsigma: torch.Tensor,
  843. ) -> torch.Tensor:
  844. """Apply top-nsigma truncation to the logits.
  845. Reference: https://arxiv.org/abs/2411.07641
  846. Args:
  847. logits: Logits of shape (num_tokens, vocab_size)
  848. nsigma: Number of standard deviations to use as threshold
  849. Returns:
  850. Modified logits with values below threshold set to -inf
  851. """
  852. std = logits.std(dim=-1, keepdim=True)
  853. threshold = (logits.max(dim=-1, keepdim=True).values -
  854. nsigma.unsqueeze(dim=1) * std)
  855. logits[logits < threshold] = float("-inf")
  856. return logits
  857. def _greedy_sample(
  858. selected_seq_groups: List[SequenceGroupToSample],
  859. samples: torch.Tensor,
  860. ) -> List[Tuple[List[int], List[int]]]:
  861. """Run greedy sampling on a given samples.
  862. Args:
  863. selected_seq_groups: A list of sequence groups batched.
  864. samples: (num_selected_samples,) A tensor of samples. The length of
  865. samples could be smaller than selected_seq_groups if
  866. seq_group.do_sample is False.
  867. Returns:
  868. Tuple of (next_token_ids, parent_ids). The length of returned list is
  869. same as the length of selected_seq_groups. If the corresponding
  870. seq_group has do_sample=False, tuple contains ([], [])
  871. """
  872. samples = samples.tolist()
  873. sample_idx = 0
  874. results = []
  875. for seq_group in selected_seq_groups:
  876. if not seq_group.do_sample:
  877. results.append(([], []))
  878. continue
  879. seq_ids = seq_group.seq_ids
  880. num_parent_seqs = len(seq_ids)
  881. assert num_parent_seqs == 1, (
  882. "Greedy sampling should have only one seq.")
  883. parent_ids = list(range(num_parent_seqs))
  884. next_token_ids = [samples[sample_idx]]
  885. results.append((next_token_ids, parent_ids))
  886. sample_idx += num_parent_seqs
  887. return results
  888. def _random_sample(
  889. selected_seq_groups: List[SequenceGroupToSample],
  890. random_samples: torch.Tensor,
  891. ) -> List[Tuple[List[int], List[int]]]:
  892. """Run random sampling on a given samples.
  893. Args:
  894. selected_seq_groups: A list of sequence groups batched.
  895. random_samples: (num_selected_samples,) A tensor of samples. The
  896. length of samples could be smaller than selected_seq_groups if
  897. seq_group.do_sample is False.
  898. Returns:
  899. Tuple of (next_token_ids, parent_ids). The length of returned list is
  900. same as the length of selected_seq_groups. If the corresponding
  901. seq_group has do_sample=False, tuple contains ([], [])
  902. """
  903. # Find the maximum best_of value of the prompt phase requests.
  904. random_samples = random_samples.cpu()
  905. sample_idx = 0
  906. results = []
  907. for seq_group in selected_seq_groups:
  908. if not seq_group.do_sample:
  909. results.append(([], []))
  910. continue
  911. seq_ids = seq_group.seq_ids
  912. sampling_params = seq_group.sampling_params
  913. is_prompt = seq_group.is_prompt
  914. num_parent_seqs = len(seq_ids)
  915. if is_prompt:
  916. # Prompt phase.
  917. parent_ids = [0] * sampling_params.best_of
  918. next_token_ids = random_samples[
  919. sample_idx, :sampling_params.best_of].tolist()
  920. else:
  921. # Generation phase.
  922. parent_ids = list(range(num_parent_seqs))
  923. next_token_ids = random_samples[sample_idx:sample_idx +
  924. num_parent_seqs, 0].tolist()
  925. results.append((next_token_ids, parent_ids))
  926. sample_idx += num_parent_seqs
  927. return results
  928. def _beam_search_sample(
  929. selected_seq_groups: List[SequenceGroupToSample],
  930. logprobs: torch.Tensor,
  931. ) -> List[Tuple[List[int], List[int]]]:
  932. """Run beam sampling on a given samples.
  933. Args:
  934. selected_seq_groups: A list of sequence groups batched.
  935. logprobs: (num_selected_samples, vocab_size,) A tensor of logprob
  936. on selected sample indices.
  937. Returns:
  938. Tuple of (next_token_ids, parent_ids). The length of returned list is
  939. same as the length of selected_seq_groups. If the corresponding
  940. seq_group has do_sample=False, tuple contains ([], [])
  941. """
  942. # We sample 2 * beam_width candidates to make sure that with high
  943. # probability we can get `beam_width` candidates in addition to
  944. # the finished sequences for the next iteration. See
  945. # https://github.com/tensorflow/tensor2tensor/blob/bafdc1b67730430d38d6ab802cbd51f9d053ba2e/tensor2tensor/utils/beam_search.py#L557-L563
  946. # for details. See also HF reference:
  947. # https://github.com/huggingface/transformers/blob/a4dd53d88e4852f023332d284ff07a01afcd5681/src/transformers/generation/utils.py#L3063-L3065
  948. #
  949. # NOTE: Beam search is not vectorized, so its speed can be slower than
  950. # other sampling methods.
  951. sample_idx = 0
  952. results = []
  953. for seq_group in selected_seq_groups:
  954. if not seq_group.do_sample:
  955. results.append(([], []))
  956. continue
  957. is_prompt = seq_group.is_prompt
  958. seq_ids, sampling_params = seq_group.seq_ids, seq_group.sampling_params
  959. num_parent_seqs = len(seq_ids)
  960. beam_width = sampling_params.best_of
  961. seq_group_logprobs = logprobs[sample_idx:sample_idx + num_parent_seqs]
  962. if is_prompt:
  963. # Prompt phase.
  964. assert num_parent_seqs == 1, (
  965. "Prompt input should have only one seq.")
  966. parent_ids = [0] * (2 * beam_width)
  967. _, next_token_ids = torch.topk(seq_group_logprobs[0],
  968. 2 * beam_width)
  969. next_token_ids = next_token_ids.tolist()
  970. else:
  971. # Generation phase.
  972. cumulative_logprobs = [
  973. seq_group.seq_data[seq_id].cumulative_logprob
  974. for seq_id in seq_ids
  975. ]
  976. cumulative_logprobs = torch.tensor(
  977. cumulative_logprobs,
  978. dtype=torch.float,
  979. device=seq_group_logprobs.device)
  980. seq_group_logprobs = (seq_group_logprobs +
  981. cumulative_logprobs.unsqueeze(dim=1))
  982. _, topk_ids = torch.topk(seq_group_logprobs.flatten(),
  983. 2 * beam_width)
  984. topk_ids = topk_ids.tolist()
  985. vocab_size = seq_group_logprobs.size(-1)
  986. parent_ids = [i // vocab_size for i in topk_ids]
  987. next_token_ids = [i % vocab_size for i in topk_ids]
  988. results.append((next_token_ids, parent_ids))
  989. sample_idx += num_parent_seqs
  990. assert sample_idx == logprobs.size(0)
  991. return results
  992. # torch.multinomial forces a GPU<->CPU sync.
  993. # Therefore, we use an optimized implementation instead.
  994. # Note that we always sample with replacement.
  995. # probs will be modified in place, but this is fine, as we pass
  996. # in a copy already.
  997. def _multinomial(
  998. probs: torch.Tensor,
  999. num_samples: int,
  1000. seq_groups: Optional[List[SequenceGroupToSample]] = None,
  1001. ) -> torch.Tensor:
  1002. if num_samples > 1:
  1003. probs = probs.repeat_interleave(num_samples, dim=0)
  1004. q = torch.empty_like(probs)
  1005. if seq_groups is None:
  1006. q.exponential_()
  1007. else:
  1008. sample_idx = 0
  1009. for seq_group in seq_groups:
  1010. seq_ids = seq_group.seq_ids
  1011. stride = len(seq_ids) * num_samples
  1012. assert seq_group.generator is not None
  1013. q[sample_idx:sample_idx +
  1014. stride].exponential_(generator=seq_group.generator)
  1015. sample_idx += stride
  1016. return probs.div_(q).argmax(dim=1).view(-1, num_samples)
  1017. def _top_k_top_p_multinomial_with_kernels(
  1018. probs: torch.Tensor, top_ks: torch.Tensor, top_ps: torch.Tensor,
  1019. num_samples: int, seq_groups: Optional[List[SequenceGroupToSample]]):
  1020. max_top_k_round = 32
  1021. if num_samples > 1:
  1022. probs = probs.repeat_interleave(num_samples, dim=0)
  1023. top_ks = top_ks.repeat_interleave(num_samples)
  1024. top_ps = top_ps.repeat_interleave(num_samples)
  1025. batch_size = probs.shape[0]
  1026. uniform_samples = torch.empty((max_top_k_round, batch_size),
  1027. device=probs.device)
  1028. if seq_groups is None:
  1029. uniform_samples.uniform_()
  1030. else:
  1031. sample_idx = 0
  1032. for seq_group in seq_groups:
  1033. seq_ids = seq_group.seq_ids
  1034. stride = len(seq_ids) * num_samples
  1035. assert seq_group.generator is not None
  1036. uniform_samples[:, sample_idx:sample_idx +
  1037. stride].uniform_(generator=seq_group.generator)
  1038. sample_idx += stride
  1039. batch_next_token_ids, success = ops.top_k_top_p_sampling_from_probs(
  1040. probs,
  1041. uniform_samples,
  1042. top_ks,
  1043. top_ps,
  1044. )
  1045. if not success.all():
  1046. warnings.warn("CUDA rejection sampling failed, fallback.",
  1047. stacklevel=1)
  1048. probs = ops.top_k_renorm_prob(probs, top_ks)
  1049. probs = ops.top_p_renorm_prob(probs, top_ps)
  1050. batch_next_token_ids = ops.sampling_from_probs(
  1051. probs, uniform_samples[0])
  1052. return batch_next_token_ids.view(-1, num_samples)
  1053. def _sample_with_torch(
  1054. probs: torch.Tensor,
  1055. logprobs: torch.Tensor,
  1056. sampling_metadata: SamplingMetadata,
  1057. sampling_tensors: SamplingTensors,
  1058. include_gpu_probs_tensor: bool,
  1059. modify_greedy_probs: bool,
  1060. ) -> Tuple[List[Tuple[List[int], List[int]]], Optional[torch.Tensor]]:
  1061. categorized_seq_group_ids = {t: [] for t in SamplingType}
  1062. categorized_sample_indices = sampling_metadata.categorized_sample_indices
  1063. for i, seq_group in enumerate(sampling_metadata.seq_groups):
  1064. sampling_params = seq_group.sampling_params
  1065. sampling_type = sampling_params.sampling_type
  1066. categorized_seq_group_ids[sampling_type].append(i)
  1067. sample_results_dict: Dict[int, Tuple[List[int], List[int]]] = {}
  1068. sample_metadata = {}
  1069. multinomial_samples = {}
  1070. # Create output tensor for sampled token ids.
  1071. if include_gpu_probs_tensor:
  1072. sampled_token_ids_tensor = torch.empty(logprobs.shape[0],
  1073. 1,
  1074. dtype=torch.long,
  1075. device=logprobs.device)
  1076. else:
  1077. sampled_token_ids_tensor = None
  1078. # Counterintuitively, having two loops here is actually faster.
  1079. # The first loop can run without waiting on GPU<->CPU sync.
  1080. for sampling_type in SamplingType:
  1081. sample_indices = categorized_sample_indices[sampling_type][:, 0]
  1082. num_tokens = len(sample_indices)
  1083. if num_tokens == 0:
  1084. continue
  1085. seq_group_id = categorized_seq_group_ids[sampling_type]
  1086. seq_groups = [sampling_metadata.seq_groups[i] for i in seq_group_id]
  1087. sample_metadata[sampling_type] = (seq_group_id, seq_groups)
  1088. long_sample_indices = sample_indices.long()
  1089. if sampling_type == SamplingType.GREEDY:
  1090. greedy_samples = torch.argmax(logprobs[long_sample_indices],
  1091. dim=-1)
  1092. if include_gpu_probs_tensor:
  1093. # Store sampled tokens in output tensor.
  1094. sampled_token_ids_tensor[
  1095. long_sample_indices] = greedy_samples.unsqueeze(-1)
  1096. if modify_greedy_probs:
  1097. # If required, modify the probabilities such that sampling from
  1098. # the modified distribution would always sample the argmax
  1099. # token id.
  1100. _modify_greedy_probs_inplace(logprobs, probs,
  1101. long_sample_indices,
  1102. greedy_samples)
  1103. elif sampling_type in (SamplingType.RANDOM, SamplingType.RANDOM_SEED):
  1104. max_best_of_in_batch = 1
  1105. for seq_group in seq_groups:
  1106. if seq_group.is_prompt:
  1107. sampling_params = seq_group.sampling_params
  1108. max_best_of_in_batch = max(max_best_of_in_batch,
  1109. sampling_params.best_of)
  1110. seq_groups_arg = (None if sampling_type == SamplingType.RANDOM else
  1111. seq_groups)
  1112. if APHRODITE_USE_SAMPLING_KERNELS is not None:
  1113. multinomial_samples[
  1114. sampling_type] = _top_k_top_p_multinomial_with_kernels(
  1115. probs[long_sample_indices],
  1116. sampling_tensors.top_ks[long_sample_indices],
  1117. sampling_tensors.top_ps[long_sample_indices],
  1118. max_best_of_in_batch,
  1119. seq_groups_arg,
  1120. )
  1121. else:
  1122. multinomial_samples[sampling_type] = _multinomial(
  1123. probs[long_sample_indices],
  1124. max_best_of_in_batch,
  1125. seq_groups=seq_groups_arg)
  1126. if sampled_token_ids_tensor is not None:
  1127. # Store sampled tokens in output tensor.
  1128. sampled_token_ids_tensor[long_sample_indices] = \
  1129. multinomial_samples[sampling_type].to(torch.long)
  1130. elif sampling_type == SamplingType.BEAM:
  1131. beam_search_logprobs = logprobs[sample_indices]
  1132. else:
  1133. raise ValueError(f"Unsupported sampling type: {sampling_type}")
  1134. # GPU<->CPU sync happens in the loop below.
  1135. # This also converts the sample output to Python objects.
  1136. if not sampling_metadata.skip_sampler_cpu_output:
  1137. for sampling_type in SamplingType:
  1138. if sampling_type not in sample_metadata:
  1139. continue
  1140. (seq_group_id, seq_groups) = sample_metadata[sampling_type]
  1141. if sampling_type == SamplingType.GREEDY:
  1142. sample_results = _greedy_sample(seq_groups, greedy_samples)
  1143. elif sampling_type in (SamplingType.RANDOM,
  1144. SamplingType.RANDOM_SEED):
  1145. sample_results = _random_sample(
  1146. seq_groups, multinomial_samples[sampling_type])
  1147. elif sampling_type == SamplingType.BEAM:
  1148. sample_results = _beam_search_sample(seq_groups,
  1149. beam_search_logprobs)
  1150. sample_results_dict.update(zip(seq_group_id, sample_results))
  1151. sample_results = [
  1152. sample_results_dict.get(i, ([], []))
  1153. for i in range(len(sampling_metadata.seq_groups))
  1154. ]
  1155. else:
  1156. sample_results = []
  1157. return sample_results, sampled_token_ids_tensor
  1158. def _sample_with_triton_kernel(
  1159. probs: torch.Tensor,
  1160. logprobs: torch.Tensor,
  1161. sampling_metadata: SamplingMetadata,
  1162. sampling_tensors: SamplingTensors,
  1163. ) -> List[Tuple[List[int], List[int]]]:
  1164. categorized_seq_group_ids = {t: [] for t in SamplingType}
  1165. categorized_sample_indices = sampling_metadata.categorized_sample_indices
  1166. for i, seq_group in enumerate(sampling_metadata.seq_groups):
  1167. sampling_params = seq_group.sampling_params
  1168. sampling_type = sampling_params.sampling_type
  1169. categorized_seq_group_ids[sampling_type].append(i)
  1170. sample_results_dict: Dict[int, Tuple[List[int], List[int]]] = {}
  1171. sample_metadata = {}
  1172. max_best_of_in_batch = 1
  1173. # Counterintuitively, having two loops here is actually faster.
  1174. # The first loop can run without waiting on GPU<->CPU sync.
  1175. for sampling_type in SamplingType:
  1176. sample_indices = categorized_sample_indices[sampling_type][:, 0]
  1177. sampled_token_indices = categorized_sample_indices[sampling_type][:, 1]
  1178. num_tokens = len(sample_indices)
  1179. if num_tokens == 0:
  1180. continue
  1181. seq_group_id = categorized_seq_group_ids[sampling_type]
  1182. seq_groups = [sampling_metadata.seq_groups[i] for i in seq_group_id]
  1183. sample_metadata[sampling_type] = (seq_group_id, seq_groups,
  1184. sample_indices,
  1185. sampled_token_indices)
  1186. if sampling_type in (SamplingType.GREEDY, SamplingType.RANDOM,
  1187. SamplingType.RANDOM_SEED):
  1188. for seq_group in seq_groups:
  1189. if seq_group.is_prompt:
  1190. sampling_params = seq_group.sampling_params
  1191. max_best_of_in_batch = max(max_best_of_in_batch,
  1192. sampling_params.best_of)
  1193. elif sampling_type == SamplingType.BEAM:
  1194. beam_search_logprobs = logprobs[sample_indices]
  1195. else:
  1196. raise ValueError(f"Unsupported sampling type: {sampling_type}")
  1197. sampled_tokens, _, _ = sample_triton(
  1198. probs=probs,
  1199. seeds=sampling_tensors.sampling_seeds,
  1200. max_best_of=max_best_of_in_batch,
  1201. sample_indices=sampling_tensors.sample_indices,
  1202. logprobs=logprobs,
  1203. # don't save logprobs because we have logic for that below
  1204. # TODO: use this instead of the CPU-based logic below
  1205. save_logprobs=False,
  1206. )
  1207. # GPU<->CPU sync happens in the loop below.
  1208. for sampling_type in SamplingType:
  1209. if sampling_type not in sample_metadata:
  1210. continue
  1211. (seq_group_id, seq_groups, sample_indices,
  1212. sampled_token_indices) = sample_metadata[sampling_type]
  1213. if sampling_type == SamplingType.GREEDY:
  1214. sample_results = _greedy_sample(
  1215. seq_groups, sampled_tokens[sampled_token_indices][:, 0])
  1216. elif sampling_type in (SamplingType.RANDOM, SamplingType.RANDOM_SEED):
  1217. sample_results = _random_sample(
  1218. seq_groups, sampled_tokens[sampled_token_indices])
  1219. elif sampling_type == SamplingType.BEAM:
  1220. sample_results = _beam_search_sample(seq_groups,
  1221. beam_search_logprobs)
  1222. sample_results_dict.update(zip(seq_group_id, sample_results))
  1223. sample_results = [
  1224. sample_results_dict.get(i, ([], []))
  1225. for i in range(len(sampling_metadata.seq_groups))
  1226. ]
  1227. return sample_results
  1228. def _sample(
  1229. probs: torch.Tensor, logprobs: torch.Tensor,
  1230. sampling_metadata: SamplingMetadata, sampling_tensors: SamplingTensors,
  1231. include_gpu_probs_tensor: bool, modify_greedy_probs: bool
  1232. ) -> Tuple[List[Tuple[List[int], List[int]]], Optional[torch.Tensor]]:
  1233. """
  1234. Args:
  1235. probs: (num_query_tokens_in_batch, num_vocab)
  1236. logprobs: (num_query_tokens_in_batch, num_vocab)
  1237. sampling_metadata: The metadata for a batch for sampling.
  1238. sampling_tensors: Tensors that include sampling related metadata.
  1239. Returns:
  1240. (next_token_ids, parent_seq_ids) for each seq group in a batch.
  1241. If sampling is skipped, it returns ([], [])
  1242. sampled_token_ids_tensor: A tensor of sampled token ids.
  1243. """
  1244. return _sample_with_torch(
  1245. probs,
  1246. logprobs,
  1247. sampling_metadata,
  1248. sampling_tensors,
  1249. include_gpu_probs_tensor=include_gpu_probs_tensor,
  1250. modify_greedy_probs=modify_greedy_probs,
  1251. )
  1252. # TODO: Enable once Triton kernel & associated code is faster.
  1253. # return _sample_with_triton_kernel(probs, logprobs, sampling_metadata,
  1254. # sampling_tensors)
  1255. def _get_ranks(x: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
  1256. """
  1257. This function calculates the ranks of the chosen tokens in a logprob tensor.
  1258. Args:
  1259. x (torch.Tensor): 2D logprob tensor of shape (N, M)
  1260. where N is the no. of tokens and M is the vocab dim.
  1261. indices (torch.Tensor): List of chosen token indices.
  1262. Returns:
  1263. torch.Tensor: 1D tensor of shape (N,) where N is the no. of tokens.
  1264. Each element in the returned tensor represents the rank
  1265. of the chosen token in the input logprob tensor.
  1266. """
  1267. vals = x[torch.arange(0, len(x), device=x.device, dtype=indices.dtype),
  1268. indices]
  1269. return (x > vals[:, None]).long().sum(1).add_(1)
  1270. def _get_logprobs(
  1271. logprobs: torch.Tensor,
  1272. sampling_metadata: SamplingMetadata,
  1273. sample_results: List[Tuple[List[int], List[int]]],
  1274. ) -> Tuple[List[Optional[PromptLogprobs]], List[SampleLogprobs]]:
  1275. """Return sample lobprobs and prompt logprobs.
  1276. The logic consists of 3 parts.
  1277. - Select indices to compute logprob from, ranks of token ids, and
  1278. the top k token ids from logprobs.
  1279. - Compute prompt logprobs if required.
  1280. - Compute sample logprobs if required.
  1281. Args:
  1282. logprobs: (num_query_tokens_across_batch, num_vocab). Each query token's
  1283. logprob per vocab. Sequence groups' query tokens are batched in a
  1284. single flattened tensor. For example, assuming there are N
  1285. seq groups, it is sorted by prefill tokens for seq_group_1 (if
  1286. prompt logprob is enabled), decode tokens for seq_group_1 (if
  1287. sampling is required), prefill tokens for seq_group_2, ...
  1288. sampling_metadata: The sampling metadata.
  1289. sample_results: (num_seq_groups) The tuple of (next_token_ids,
  1290. parent_ids) for each sequence group. When beam search is enabled,
  1291. sample_results can contain different number of seq_ids from
  1292. sampling_metadata.seq_groups. It is because beam search creates
  1293. 2 * BEAM_WIDTH number of samples (whereas there are only up to
  1294. BEAM_WIDTH number of seq_ids).
  1295. Returns:
  1296. A tuple of prompt and sample logprobs per sequence group in a batch.
  1297. """
  1298. # The index of query token to calculate logprobs. It includes both
  1299. # prompt and sample logprob indices.
  1300. query_indices: List[int] = []
  1301. # The next token ids to get the logprob value from.
  1302. next_token_ids: List[int] = []
  1303. # The largest requested number of logprobs. We find logprobs as many as the
  1304. # largest num logprobs in this API. If every logprobs is None, it will be
  1305. # set to -1.
  1306. largest_num_logprobs = -1
  1307. # If beam search is enabled.
  1308. use_beam_search = False
  1309. # Select indices to compute logprob from, ranks of token ids, and the top
  1310. # k token ids from logprobs.
  1311. for (seq_group, sample_result) in zip(sampling_metadata.seq_groups,
  1312. sample_results):
  1313. sampling_params = seq_group.sampling_params
  1314. # Update indices and tokens for prompt logprobs.
  1315. if (seq_group.is_prompt
  1316. and sampling_params.prompt_logprobs is not None):
  1317. largest_num_logprobs = max(largest_num_logprobs,
  1318. sampling_params.prompt_logprobs)
  1319. next_prompt_tokens = _get_next_prompt_tokens(seq_group)
  1320. query_indices.extend(seq_group.prompt_logprob_indices)
  1321. next_token_ids.extend(next_prompt_tokens)
  1322. # Update indices and next tokenes for sample logprob.
  1323. if seq_group.do_sample:
  1324. token_ids, parent_seq_ids = sample_result
  1325. # NOTE: We cannot directly use sample_indices because
  1326. # sample_indices only contain parent seq_ids of a previous step.
  1327. # The current step may have different number of seq_ids, and
  1328. # we can obtain it from `sample_result[1]`.
  1329. query_idx = seq_group.sample_indices[0]
  1330. query_indices.extend(
  1331. [query_idx + parent_id for parent_id in parent_seq_ids])
  1332. next_token_ids.extend(token_ids)
  1333. if sampling_params.logprobs is not None:
  1334. largest_num_logprobs = max(largest_num_logprobs,
  1335. sampling_params.logprobs)
  1336. use_beam_search = use_beam_search or sampling_params.use_beam_search
  1337. assert len(next_token_ids) == len(query_indices)
  1338. if len(query_indices) == 0:
  1339. empty_sampled_logprob = []
  1340. empty_prompt_logprob = None
  1341. return [empty_prompt_logprob], [empty_sampled_logprob]
  1342. selected_logprobs, ranks = None, None
  1343. top_logprobs, top_token_ids = None, None
  1344. # If largest_num_logprobs == -1, i.e. no logprobs are requested, we can
  1345. # skip the whole logprob calculation.
  1346. if largest_num_logprobs >= 0 or use_beam_search:
  1347. query_indices_gpu = torch.tensor(query_indices, device=logprobs.device)
  1348. next_token_ids_gpu = torch.tensor(next_token_ids,
  1349. device=logprobs.device)
  1350. # (num_selected_query_tokens, num_logprobs). Note that query_indices can
  1351. # contain duplicates if beam search is enabled.
  1352. selected_logprobs = logprobs[[
  1353. query_indices_gpu,
  1354. next_token_ids_gpu,
  1355. ]]
  1356. ranks = _get_ranks(
  1357. logprobs[query_indices_gpu],
  1358. next_token_ids_gpu,
  1359. )
  1360. assert selected_logprobs.shape[0] == ranks.shape[0]
  1361. # We need to compute top k only if there exists logprobs > 0.
  1362. if largest_num_logprobs > 0:
  1363. # Logprobs of topk tokens for a batch of sequence groups.
  1364. # (num_query_tokens_across_batch).
  1365. top_logprobs, top_token_ids = torch.topk(logprobs,
  1366. largest_num_logprobs,
  1367. dim=-1)
  1368. top_logprobs = top_logprobs.to('cpu')
  1369. top_token_ids = top_token_ids.to('cpu')
  1370. selected_logprobs = selected_logprobs.to('cpu')
  1371. ranks = ranks.to('cpu')
  1372. # Find prompt/sample logprobs.
  1373. prompt_logprobs_per_seq_group: List[Optional[PromptLogprobs]] = []
  1374. sample_logprobs_per_seq_group: List[SampleLogprobs] = []
  1375. top_logprob_idx = 0
  1376. selected_logprobs_idx = 0
  1377. for seq_group, sample_result in zip(sampling_metadata.seq_groups,
  1378. sample_results):
  1379. (prompt_logprobs, top_logprob_idx,
  1380. selected_logprobs_idx) = _get_prompt_logprob_if_needed(
  1381. seq_group, selected_logprobs, ranks, top_token_ids, top_logprobs,
  1382. selected_logprobs_idx, top_logprob_idx)
  1383. prompt_logprobs_per_seq_group.append(prompt_logprobs)
  1384. (sampled_logprobs, top_logprob_idx,
  1385. selected_logprobs_idx) = _get_sampled_logprob_if_needed(
  1386. seq_group, sample_result, selected_logprobs, ranks, top_token_ids,
  1387. top_logprobs, selected_logprobs_idx, top_logprob_idx)
  1388. sample_logprobs_per_seq_group.append(sampled_logprobs)
  1389. return prompt_logprobs_per_seq_group, sample_logprobs_per_seq_group
  1390. def _get_prompt_logprob_if_needed(
  1391. seq_group: SequenceGroupToSample,
  1392. selected_logprobs: torch.Tensor,
  1393. ranks: torch.Tensor,
  1394. top_token_ids: torch.Tensor,
  1395. top_logprobs: torch.Tensor,
  1396. selected_logprobs_idx: int,
  1397. top_logprob_idx: int,
  1398. ):
  1399. """Compute the prompt logprob from a sequence group if needed."""
  1400. sampling_params = seq_group.sampling_params
  1401. is_prompt = seq_group.is_prompt
  1402. # Find prompt logprobs
  1403. prompt_logprobs: Optional[PromptLogprobs] = None
  1404. if is_prompt and sampling_params.prompt_logprobs is not None:
  1405. prompt_logprobs = []
  1406. num_logprobs = sampling_params.prompt_logprobs
  1407. next_prompt_tokens = _get_next_prompt_tokens(seq_group)
  1408. # Pre-select indexes and create a list. It is faster than calling .item
  1409. # repetitively.
  1410. selected_logprob_items = selected_logprobs[
  1411. selected_logprobs_idx:selected_logprobs_idx +
  1412. len(next_prompt_tokens)].tolist()
  1413. rank_items = ranks[selected_logprobs_idx:selected_logprobs_idx +
  1414. len(next_prompt_tokens)].tolist()
  1415. for idx, token_id in enumerate(next_prompt_tokens):
  1416. # Calculate the prompt logprob of the real prompt tokens.
  1417. # {token_id: (logprob, rank_from_vocab)}
  1418. prompt_logprobs_dict: Dict[int, Tuple[float, int]] = {
  1419. token_id: (selected_logprob_items[idx], rank_items[idx])
  1420. }
  1421. # Add top K prompt logprobs along with its rank.
  1422. if num_logprobs > 0:
  1423. top_ids = top_token_ids[
  1424. top_logprob_idx, :num_logprobs].tolist()
  1425. top_probs = top_logprobs[
  1426. top_logprob_idx, :num_logprobs].tolist()
  1427. # Top K is already sorted by rank, so we can use 1 ~
  1428. # num_logprobs + 1 for rank.
  1429. top_ranks = range(1, num_logprobs + 1)
  1430. prompt_logprobs_dict.update({
  1431. top_id: (top_prob, rank)
  1432. for top_id, top_prob, rank in zip(top_ids, top_probs,
  1433. top_ranks)
  1434. })
  1435. prompt_logprobs.append({
  1436. token_id: Logprob(*logprob_and_rank)
  1437. for token_id, logprob_and_rank in prompt_logprobs_dict.items()
  1438. })
  1439. # + 1 to go to the next prompt token.
  1440. top_logprob_idx += 1
  1441. # + len(next_prompt_tokens) to go to the next prompt.
  1442. selected_logprobs_idx += len(next_prompt_tokens)
  1443. return prompt_logprobs, top_logprob_idx, selected_logprobs_idx
  1444. def _get_sampled_logprob_if_needed(
  1445. seq_group: SequenceGroupToSample,
  1446. sample_result: Tuple[List[int], List[int]],
  1447. selected_logprobs: torch.Tensor,
  1448. ranks: torch.Tensor,
  1449. top_token_ids: torch.Tensor,
  1450. top_logprobs: torch.Tensor,
  1451. selected_logprobs_idx: int,
  1452. top_logprob_idx: int,
  1453. ):
  1454. """Compute the sample logprob if needed."""
  1455. seq_ids = seq_group.seq_ids
  1456. num_logprobs = seq_group.sampling_params.logprobs
  1457. use_beam_search = seq_group.sampling_params.use_beam_search
  1458. sampled_logprobs: SampleLogprobs = []
  1459. next_token_ids, parent_seq_ids = sample_result
  1460. if seq_group.do_sample:
  1461. assert len(next_token_ids) > 0
  1462. if num_logprobs is None and not use_beam_search:
  1463. for next_token_id in next_token_ids:
  1464. # Use a dummy logprob
  1465. sampled_logprobs.append({next_token_id: Logprob(inf)})
  1466. else:
  1467. # Pre-select items from tensor. tolist() is faster than repetitive
  1468. # `.item()` calls.
  1469. selected_logprob_items = selected_logprobs[
  1470. selected_logprobs_idx:selected_logprobs_idx +
  1471. len(next_token_ids)].tolist()
  1472. rank_items = ranks[selected_logprobs_idx:selected_logprobs_idx +
  1473. len(next_token_ids)].tolist()
  1474. for idx, (next_token_id, parent_id) in enumerate(
  1475. zip(next_token_ids, parent_seq_ids)):
  1476. # Get the logprob of a sampled token.
  1477. sampled_logprobs_dict = {
  1478. next_token_id:
  1479. (selected_logprob_items[idx], rank_items[idx])
  1480. }
  1481. if num_logprobs is not None and num_logprobs > 0:
  1482. # Get top K logprobs.
  1483. top_ids = top_token_ids[top_logprob_idx +
  1484. parent_id, :num_logprobs].tolist()
  1485. top_probs = top_logprobs[
  1486. top_logprob_idx + parent_id, :num_logprobs].tolist()
  1487. # Top K is already sorted by rank, so we can use 1 ~
  1488. # num_logprobs + 1 for rank.
  1489. top_ranks = range(1, num_logprobs + 1)
  1490. sampled_logprobs_dict.update({
  1491. top_id: (top_prob, rank)
  1492. for top_id, top_prob, rank in zip(
  1493. top_ids, top_probs, top_ranks)
  1494. })
  1495. sampled_logprobs.append({
  1496. token_id: Logprob(*logprob_and_rank)
  1497. for token_id, logprob_and_rank in
  1498. sampled_logprobs_dict.items()
  1499. })
  1500. # NOTE: This part of code is not intuitive. `selected_logprobs` include
  1501. # logprobs for the current step, which has len(next_token_ids) tokens
  1502. # per sequence group. `logprobs` includes logprobs from the previous
  1503. # steps, which has len(seq_ids) tokens per sequence group.
  1504. # Iterate to the next sequence group in a batch.
  1505. selected_logprobs_idx += len(next_token_ids)
  1506. # Iterate to the next sequence group in a batch.
  1507. top_logprob_idx += len(seq_ids)
  1508. return sampled_logprobs, top_logprob_idx, selected_logprobs_idx
  1509. def _modify_greedy_probs_inplace(logprobs: torch.Tensor, probs: torch.Tensor,
  1510. sample_indices: torch.Tensor,
  1511. greedy_samples: torch.Tensor) -> None:
  1512. """Modify the probability distributions of the greedily-sampled tokens such
  1513. that each sampled token has a "probability" of 1.0. This is required by
  1514. speculative decoding, which depends on the sampling method being encoded
  1515. within the probability distribution for correctness.
  1516. # Why do we only need to do this for greedy sampling?
  1517. Aphrodite's sampler performs the following steps for greedy or multinomial
  1518. (random) sampling:
  1519. 1. Get logits from model.
  1520. 2. Modify logits according to per-sequence sampling parameters.
  1521. - Multiply by temperature, top-k and top-p masking, penalize tokens
  1522. according to their frequency, etc.
  1523. 3. Sample a token.
  1524. - Random sampling simply samples from the modified probability
  1525. distribution.
  1526. - Greedy sampling performs `argmax` to obtain the token with the
  1527. highest likelihood.
  1528. Ignoring greedy sampling for a moment, we find that the computed probability
  1529. distribution has the following property: we can sample from it independently
  1530. and find that the token sampled by the Sampler has a frequency corresponding
  1531. to how often we see it in our sampling. In other words, for tokens sampled
  1532. with Aphrodite's random SamplingType, the computed probability distribution
  1533. encodes the sampling methodology completely.
  1534. Greedy sampling does not normally have this property. Aphrodite modifies
  1535. logits according to sampling params, then performs `argmax`, then returns
  1536. the sampled token and the computed probability distribution. If we sample
  1537. from the distribution, we'll find the likelihood of the greedily-sampled
  1538. token is not always 1.0.
  1539. Since lossless speculative decoding requires that the sampling methodology
  1540. be encoded within the probability distribution, we are motivated to modify
  1541. the probability distribution such that the sampled token has probability 1
  1542. when speculative decoding is used.
  1543. NOTE: Alternatively, we could use an extremely low temperature to achieve
  1544. greedy sampling using multinomial computation and unite the codepaths. This
  1545. has implications on the overall design of the sampler, e.g. how to record
  1546. accurate logprobs for the user, so this improvement is deferred to later.
  1547. """
  1548. # NOTE: logprobs are not modified so they can be returned to the user.
  1549. probs[sample_indices, :] = 0
  1550. probs[sample_indices, greedy_samples] = 1.0
  1551. def _build_sampler_output(
  1552. sample_results: SampleResultType,
  1553. sampling_metadata: SamplingMetadata,
  1554. prompt_logprobs: Optional[List[Optional[PromptLogprobs]]],
  1555. sample_logprobs: Optional[List[SampleLogprobs]],
  1556. on_device_tensors: Optional[Tuple[torch.Tensor, torch.Tensor,
  1557. torch.Tensor]],
  1558. skip_sampler_cpu_output: bool = False,
  1559. ) -> SamplerOutput:
  1560. """Construct Python objects with the output of sampling.
  1561. Args:
  1562. on_device_tensors: Tuple containing on-device tensors with the
  1563. probabilities used in sampling and the sampled token ids. This
  1564. allows post-processing without copies to CPU/serialization, e.g. in
  1565. speculative decoding rejection sampling.
  1566. """
  1567. sampler_output: List[CompletionSequenceGroupOutput] = []
  1568. if not skip_sampler_cpu_output:
  1569. assert prompt_logprobs is not None
  1570. assert sample_logprobs is not None
  1571. for (seq_group, sample_result, group_prompt_logprobs,
  1572. group_sample_logprobs) in zip(sampling_metadata.seq_groups,
  1573. sample_results, prompt_logprobs,
  1574. sample_logprobs):
  1575. seq_ids = seq_group.seq_ids
  1576. next_token_ids, parent_ids = sample_result
  1577. seq_outputs: List[SequenceOutput] = []
  1578. for parent_id, next_token_id, logprobs in zip(
  1579. parent_ids, next_token_ids, group_sample_logprobs):
  1580. seq_outputs.append(
  1581. SequenceOutput(seq_ids[parent_id], next_token_id,
  1582. logprobs))
  1583. sampler_output.append(
  1584. CompletionSequenceGroupOutput(seq_outputs,
  1585. group_prompt_logprobs))
  1586. # If not specified, store None values in SamplerOutput.
  1587. if on_device_tensors is not None:
  1588. (sampled_token_probs, logprobs_tensor,
  1589. sampled_token_ids) = on_device_tensors
  1590. else:
  1591. sampled_token_probs, logprobs_tensor, sampled_token_ids = (None, None,
  1592. None)
  1593. return SamplerOutput(
  1594. outputs=sampler_output,
  1595. sampled_token_probs=sampled_token_probs,
  1596. sampled_token_ids=sampled_token_ids,
  1597. logprobs=logprobs_tensor,
  1598. )
  1599. def _get_next_prompt_tokens(seq_group: SequenceGroupToSample) -> List[str]:
  1600. """Get a list of next prompt tokens to compute logprob from a
  1601. given sequence group.
  1602. It is used to compute prompt logprob. Imagine you have logprob for each
  1603. query token. Query token needs to know the next prompt token id to compute
  1604. prompt logprob. This is a helper to obtain next prompt token ids.
  1605. This API has to be used only when the caller knows seq_group is in prefill
  1606. stage.
  1607. Returns:
  1608. A list of next prompt tokens to compute logprob.
  1609. """
  1610. assert seq_group.is_prompt, (
  1611. "Caller should ensure the sequence group is in a prefill stage.")
  1612. seq_ids = seq_group.seq_ids
  1613. query_len = seq_group.query_len
  1614. assert query_len is not None
  1615. # prompt has only 1 seq id.
  1616. assert len(seq_ids) == 1
  1617. seq_data = seq_group.seq_data[seq_ids[0]]
  1618. computed_len = seq_data.get_num_computed_tokens()
  1619. prompt_tokens = seq_data.prompt_token_ids
  1620. # +1 because we are looking for a next prompt token.
  1621. next_token_index_start = computed_len + 1
  1622. next_token_index_end = min(computed_len + query_len + 1,
  1623. len(prompt_tokens))
  1624. next_prompt_tokens = prompt_tokens[
  1625. next_token_index_start:next_token_index_end]
  1626. return next_prompt_tokens
  1627. def _get_ngrams(
  1628. ngram_size: int,
  1629. prev_input_ids: torch.Tensor
  1630. ) -> Dict[Tuple[int, ...], List[int]]:
  1631. """Get dictionary of ngrams and the tokens that followed them.
  1632. Args:
  1633. ngram_size: Size of ngrams to track
  1634. prev_input_ids: 1D tensor of previous token ids
  1635. Returns:
  1636. Dictionary mapping ngram tuples to list of tokens that followed them
  1637. """
  1638. generated_ngrams = {}
  1639. gen_tokens = prev_input_ids.tolist()
  1640. for i in range(len(gen_tokens) - ngram_size + 1):
  1641. ngram = tuple(gen_tokens[i:i + ngram_size - 1])
  1642. next_token = gen_tokens[i + ngram_size - 1]
  1643. if ngram in generated_ngrams:
  1644. generated_ngrams[ngram].append(next_token)
  1645. else:
  1646. generated_ngrams[ngram] = [next_token]
  1647. return generated_ngrams
  1648. def _get_generated_ngrams(
  1649. banned_ngrams: Dict[Tuple[int, ...], List[int]],
  1650. prev_input_ids: torch.Tensor,
  1651. ngram_size: int,
  1652. cur_len: int
  1653. ) -> List[int]:
  1654. """Get list of tokens that would create a repeated ngram if generated next.
  1655. Args:
  1656. banned_ngrams: Dictionary of previously seen ngrams and their next
  1657. tokens
  1658. prev_input_ids: Previous token ids
  1659. ngram_size: Size of ngrams to check
  1660. cur_len: Current position in sequence
  1661. Returns:
  1662. List of token ids that would create a repeat ngram
  1663. """
  1664. start_idx = cur_len + 1 - ngram_size
  1665. current_ngram = tuple(prev_input_ids[start_idx:cur_len].tolist())
  1666. return banned_ngrams.get(current_ngram, [])
  1667. def _calc_banned_ngram_tokens(
  1668. ngram_size: int,
  1669. prev_input_ids: torch.Tensor,
  1670. cur_len: int
  1671. ) -> List[int]:
  1672. """Calculate tokens that would create repeated ngrams if generated next.
  1673. Args:
  1674. ngram_size: Size of ngrams to prevent repeating
  1675. prev_input_ids: Previous token ids in sequence
  1676. cur_len: Current position in sequence
  1677. Returns:
  1678. List of token ids that should be banned to prevent ngram repetition
  1679. """
  1680. if cur_len + 1 < ngram_size:
  1681. return []
  1682. generated_ngrams = _get_ngrams(ngram_size, prev_input_ids)
  1683. banned_tokens = _get_generated_ngrams(
  1684. generated_ngrams,
  1685. prev_input_ids,
  1686. ngram_size,
  1687. cur_len
  1688. )
  1689. return banned_tokens
  1690. # def _apply_mirostat_v2(logits: torch.Tensor,
  1691. # sampling_tensors: SamplingTensors) -> torch.Tensor:
  1692. # # Reduce our view to just the affected logits
  1693. # logit_view = logits[sampling_tensors.miro_indices]
  1694. # # Calculate surprise value per token
  1695. # # Convert nats to bits for compatibility with ooba/kobold parameters.
  1696. # logit_surprise = torch.log_softmax(logit_view, dim=-1) / -math.log(2)
  1697. # # Mask out "too-surprising" tokens (surprisal > mu)
  1698. # mus = sampling_tensors.miro_mus
  1699. # miro_mask = logit_surprise > mus.unsqueeze(dim=-1)
  1700. # # Unmask most-likely logit to guarantee a selection.
  1701. # maxinds = torch.argmax(logit_view, dim=-1, keepdim=True)
  1702. # miro_mask.scatter_(dim=1, index=maxinds, value=False)
  1703. # # Apply logit mask (effectively a top-k filter).
  1704. # logit_view[miro_mask] = -float("inf")
  1705. # # Project logit changes made to the view onto the original.
  1706. # # I think this step might be redundant.
  1707. # logits[sampling_tensors.miro_indices] = logit_view
  1708. # return logits
  1709. # def _mirostat_store_args(logits: torch.Tensor, args: SamplingTensors,
  1710. # sample_results: List[Tuple[List[int], List[int]]],
  1711. # sampling_metadata: SamplingMetadata,
  1712. # output_metadata: OutputMetadata) -> None:
  1713. # """Based on whichever token was finally sampled, we calculate the
  1714. # final surprisal values to update the mus.
  1715. # Because a single sequence can have multiple samples, we must fork
  1716. # the mu accordingly."""
  1717. # assert sampling_metadata.seq_groups is not None
  1718. # seqid_to_tokens = {}
  1719. # seqid_to_indices = {}
  1720. # for (sids, _), (toks, parents) in zip(sampling_metadata.seq_groups,
  1721. # sample_results):
  1722. # for idx, (token, parent) in enumerate(zip(toks, parents)):
  1723. # seqid_to_tokens.setdefault(sids[parent], []).append(token)
  1724. # seqid_to_indices.setdefault(sids[parent], []).append(idx)
  1725. # seqids = args.miro_seqids
  1726. # picked_tokens = torch.tensor([seqid_to_tokens[x] for x in seqids],
  1727. # device=logits.device,
  1728. # dtype=torch.long)
  1729. # # Clumsily, we recalculate token surprisals.
  1730. # logits_view = logits[args.miro_indices]
  1731. # picked_surprise = torch.gather(torch.log_softmax(logits_view, dim=-1),
  1732. # dim=-1,
  1733. # index=picked_tokens) / -math.log(2)
  1734. # taus = args.miro_taus.unsqueeze(dim=-1) # AKA target surprisals
  1735. # etas = args.miro_etas.unsqueeze(dim=-1) # AKA accumulation rates
  1736. # mus = args.miro_mus.unsqueeze(dim=-1) # AKA surprisal accumulators
  1737. # nu_mus = mus - (picked_surprise - taus) * etas
  1738. # # Record updated mu values for use in the next iteration
  1739. # # Note how each mu is split into multiple based on the number of samples.
  1740. # for seqid, seq_mus in zip(seqids, nu_mus):
  1741. # for sample_idx, mu in zip(seqid_to_indices[seqid], seq_mus):
  1742. # output_metadata.add(seqid, sample_idx, "miro_mu", mu)