sampling_params.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. """Sampling parameters for text generation."""
  2. import copy
  3. from enum import Enum, IntEnum
  4. from functools import cached_property
  5. from typing import Any, Callable, Dict, List, Optional, Set, Union
  6. import msgspec
  7. import torch
  8. from loguru import logger
  9. from typing_extensions import Annotated
  10. import aphrodite.common.envs as envs
  11. from aphrodite.common.config import SchedulerConfig
  12. _SAMPLING_EPS = 1e-5
  13. _MAX_TEMP = 1e-2
  14. APHRODITE_NO_DEPRECATION_WARNING = envs.APHRODITE_NO_DEPRECATION_WARNING
  15. class SamplingType(IntEnum):
  16. GREEDY = 0
  17. RANDOM = 1
  18. RANDOM_SEED = 2
  19. BEAM = 3
  20. class RequestOutputKind(Enum):
  21. # Return entire output so far in every RequestOutput
  22. CUMULATIVE = 0
  23. # Return only deltas in each RequestOutput
  24. DELTA = 1
  25. # Do not return intermediate RequestOuputs
  26. FINAL_ONLY = 2
  27. class SamplerID(IntEnum):
  28. # Mirror these in aphrodite/modeling/layers/sampler.py
  29. # Values out of order to keep backwards compatibility
  30. # with Koboldcpp values
  31. DRY = 7
  32. PENALTIES = 6
  33. NO_REPEAT_NGRAM = 8
  34. TEMPERATURE = 5
  35. TOP_NSIGMA = 9
  36. TOP_P_TOP_K = 0
  37. TOP_A = 1
  38. MIN_P = 2
  39. TFS = 3
  40. ETA_CUTOFF = 10
  41. EPSILON_CUTOFF = 11
  42. TYPICAL_P = 4
  43. QUADRATIC = 12
  44. XTC = 13
  45. @classmethod
  46. def from_str(cls, value: Union[str, int]) -> "SamplerID":
  47. """Convert string or int to SamplerID enum.
  48. Args:
  49. value: String name (case-insensitive) or integer value
  50. Returns:
  51. SamplerID enum value
  52. Raises:
  53. ValueError: If value cannot be converted to SamplerID
  54. """
  55. if isinstance(value, int):
  56. return cls(value)
  57. try:
  58. return cls[value.upper()]
  59. except KeyError as e:
  60. valid_names = [x.name for x in cls]
  61. raise ValueError(
  62. f"Invalid sampler name '{value}'. Must be one of: {valid_names}"
  63. ) from e
  64. LogitsProcessorFunc = Union[Callable[[List[int], torch.Tensor], torch.Tensor],
  65. Callable[[List[int], List[int], torch.Tensor],
  66. torch.Tensor]]
  67. """LogitsProcessor is a function that takes a list
  68. of previously generated tokens, the logits tensor
  69. for the next token and, optionally, prompt tokens as a
  70. first argument, and returns a modified tensor of logits
  71. to sample from."""
  72. class SamplingParams(
  73. msgspec.Struct,
  74. omit_defaults=True,
  75. dict=True):
  76. """Sampling parameters for text generation.
  77. Overall, we follow the sampling parameters from the OpenAI text completion
  78. API (https://platform.openai.com/docs/api-reference/completions/create).
  79. In addition, we support multiple additional samplers which are not supported
  80. by OpenAI.
  81. Args:
  82. n: Number of output sequences to return for the given prompt.
  83. best_of: Number of output sequences that are generated from the prompt.
  84. From these `best_of` sequences, the top `n` sequences are returned.
  85. `best_of` must be greater than or equal to `n`. This is treated as
  86. the beam width when `use_beam_search` is True. By default, `best_of`
  87. is set to `n`.
  88. presence_penalty: Float that penalizes new tokens based on whether they
  89. appear in the generated text so far. Values > 0 encourage the model
  90. to use new tokens, while values < 0 encourage the model to repeat
  91. tokens.
  92. frequency_penalty: Float that penalizes new tokens based on their
  93. frequency in the generated text so far. Values > 0 encourage the
  94. model to use new tokens, while values < 0 encourage the model to
  95. repeat tokens.
  96. repetition_penalty: Float that penalizes new tokens based on their
  97. frequency in the generated text so far.
  98. freq_pen is applied additively while
  99. rep_pen is applied multiplicatively.
  100. Must be in [1, inf). Set to 1 to disable the effect.
  101. no_repeat_ngram_size: Size of the n-grams to prevent repeating.
  102. 1 would mean no token can appear twice.
  103. 2 would mean no pair of consecutive tokens can appear twice.
  104. temperature: Float that controls the randomness of the sampling. Lower
  105. values make the model more deterministic, while higher values make
  106. the model more random. Zero means greedy sampling.
  107. top_p: Float that controls the cumulative probability of the top tokens
  108. to consider. Must be in (0, 1]. Set to 1 to consider all tokens.
  109. top_k: Integer that controls the number of top tokens to consider. Set
  110. to -1 to consider all tokens.
  111. top_a: Float that controls the cutoff for Top-A sampling.
  112. Exact cutoff is top_a*max_prob**2. Must be in [0,inf], 0 to disable.
  113. min_p: Float that controls the cutoff for min-p sampling.
  114. Exact cutoff is min_p*max_prob. Must be in [0,1], 0 to disable.
  115. tfs: Float that controls the cumulative approximate curvature of the
  116. distribution to retain for Tail Free Sampling.
  117. Must be in (0, 1]. Set to 1 to disable
  118. eta_cutoff: Float that controls the cutoff threshold for Eta sampling
  119. (a form of entropy adaptive truncation sampling)
  120. threshold is computed as min(eta, sqrt(eta)*entropy(probs)).
  121. Specified in units of 1e-4. Set to 0 to disable
  122. epsilon_cutoff: Float that controls the cutoff threshold for
  123. Epsilon sampling (simple probability threshold truncation).
  124. Specified in units of 1e-4. Set to 0 to disable.
  125. typical_p: Float that controls the cumulative probability of tokens
  126. closest in surprise to the expected surprise to consider.
  127. Must be in (0, 1]. Set to 1 to disable.
  128. mirostat_mode: Can either be 0 (disabled) or 2 (Mirostat v2).
  129. mirostat_tau: Target "surprisal" that mirostat works towards.
  130. Range [0, inf).
  131. mirostat_eta: Rate at which mirostat updates its internal surprisal
  132. value. Range [0, inf).
  133. dynatemp_min: Minimum temperature for dynatemp sampling.
  134. Range [0, inf).
  135. dynatemp_max: Maximum temperature for dynatemp sampling.
  136. Range [0, inf).
  137. dynatemp_exponent: Exponent for dynatemp sampling. Range [0, inf).
  138. smoothing_factor: Smoothing factor for Quadratic Sampling.
  139. smoothing_curve: Smoothing curve for Quadratic (Cubic) Sampling.
  140. seed: Random seed to use for the generation.
  141. use_beam_search: Whether to use beam search instead of sampling.
  142. length_penalty: Float that penalizes sequences based on their length.
  143. Used in beam search.
  144. early_stopping: Controls the stopping condition for beam search. It
  145. accepts the following values: `True`, where the generation stops as
  146. soon as there are `best_of` complete candidates; `False`, where an
  147. heuristic is applied and the generation stops when is it very
  148. unlikely to find better candidates; `"never"`, where the beam search
  149. procedure only stops when there cannot be better candidates
  150. (canonical beam search algorithm).
  151. stop: List of strings that stop the generation when they are generated.
  152. The returned output will not contain the stop strings.
  153. stop_token_ids: List of tokens that stop the generation when they are
  154. generated. The returned output will contain the stop tokens unless
  155. the stop tokens are special tokens.
  156. include_stop_str_in_output: Whether to include the stop strings in
  157. output text. Defaults to False.
  158. ignore_eos: Whether to ignore the EOS token and continue generating
  159. tokens after the EOS token is generated.
  160. max_tokens: Maximum number of tokens to generate per output sequence.
  161. min_tokens: Minimum number of tokens to generate per output sequence
  162. before EOS or stop tokens are generated.
  163. logprobs: Number of log probabilities to return per output token.
  164. When set to None, no probability is returned. If set to a non-None
  165. value, the result includes the log probabilities of the specified
  166. number of most likely tokens, as well as the chosen tokens.
  167. Note that the implementation follows the OpenAI API: The API will
  168. always return the log probability of the sampled token, so there
  169. may be up to `logprobs+1` elements in the response.
  170. prompt_logprobs: Number of log probabilities to return per prompt token.
  171. detokenize: Whether to detokenize the output. Defaults to True.
  172. custom_token_bans: List of token IDs to ban from generating
  173. skip_special_tokens: Whether to skip special tokens in the output.
  174. defaults to true.
  175. spaces_between_special_tokens: Whether to add spaces between special
  176. tokens in the output. Defaults to True.
  177. logits_processors: List of functions that modify logits based on
  178. previously generated tokens, and optionally prompt tokens as
  179. a first argument.
  180. truncate_prompt_tokens: If set to an integer k, will use only the last
  181. k tokens from the prompt (i.e. left-truncation). Defaults to None
  182. (i.e. no truncation).
  183. xtc_threshold: In XTC sampling, if 2 or more tokens have probability
  184. above this threshold, consider removing all but the last one.
  185. xtc_probability: Probability that the removal will actually happen.
  186. 0 disables the sampler, 1 makes it always happen.
  187. nsigma: Number of standard deviations from the maximum logit to use
  188. as a cutoff threshold. Tokens with logits below
  189. (max_logit - nsgima * std_dev) are filtered out. Higher values
  190. (e.g. 3.0) keep more tokens, lower values (e.g. 1.0) are more
  191. selective. Must be positive. 0 to disable.
  192. dry_multiplier: Float that controls the magnitude of the DRY sampling
  193. penalty. Higher values create stronger penalties against
  194. repetition. The penalty is multiplied by this value before being
  195. applied. Must be non-negative. 0 disables the sampler.
  196. dry_base: Base for the exponential growth of the DRY sampling penalty.
  197. Controls how quickly the penalty increases with longer repeated
  198. sequences. Must be greater than 1. Higher values (e.g. 2.0) create
  199. more aggressive penalties for longer repetitions. Defaults to 1.75.
  200. dry_allowed_length: Maximum number of tokens that can be repeated
  201. without incurring a DRY sampling penalty. Sequences longer than
  202. this will be penalized exponentially. Must be at least 1.
  203. Defaults to 2.
  204. dry_sequence_breaker_ids: List of token IDs that stop
  205. the matching of repeated content. These tokens will break up the
  206. input into sections where repetition is evaluated separately.
  207. Common examples are newlines, quotes, and other structural tokens.
  208. Defaults to None.
  209. dry_range: The range of tokens (input + output) to apply the DRY
  210. sampler.
  211. skew: Bias the token selection towards higher or lower probability
  212. tokens. Defaults to 0 (disabled).
  213. sampler_priority: A list of integers to control the order in which
  214. samplers are applied.
  215. """
  216. n: int = 1
  217. best_of: Optional[int] = None
  218. presence_penalty: float = 0.0
  219. frequency_penalty: float = 0.0
  220. repetition_penalty: float = 1.0
  221. no_repeat_ngram_size: int = 0
  222. temperature: float = 1.0
  223. dynatemp_min: float = 0.0
  224. dynatemp_max: float = 0.0
  225. dynatemp_exponent: float = 1.0
  226. temperature_last: bool = False
  227. top_p: float = 1.0
  228. top_k: int = -1
  229. top_a: float = 0.0
  230. min_p: float = 0.0
  231. tfs: float = 1.0
  232. eta_cutoff: float = 0.0
  233. epsilon_cutoff: float = 0.0
  234. typical_p: float = 1.0
  235. smoothing_factor: float = 0.0
  236. smoothing_curve: float = 1.0
  237. seed: Optional[int] = None
  238. use_beam_search: bool = False
  239. length_penalty: float = 1.0
  240. early_stopping: Union[bool, str] = False
  241. stop: Union[None, str, List[str]] = None
  242. stop_token_ids: Optional[List[int]] = None
  243. include_stop_str_in_output: bool = False
  244. ignore_eos: bool = False
  245. max_tokens: Optional[int] = 16
  246. min_tokens: int = 0
  247. logprobs: Optional[int] = None
  248. prompt_logprobs: Optional[int] = None
  249. detokenize: bool = True
  250. custom_token_bans: Optional[List[int]] = None
  251. skip_special_tokens: bool = True
  252. spaces_between_special_tokens: bool = True
  253. # Optional[List[LogitsProcessorFunc]] type.
  254. # We use Any here because the type above
  255. # is not supported by msgspec.
  256. logits_processors: Optional[Any] = None
  257. truncate_prompt_tokens: Optional[Annotated[int, msgspec.Meta(ge=1)]] = None
  258. xtc_threshold: float = 0.1
  259. xtc_probability: float = 0
  260. nsigma: float = 0.0
  261. dry_multiplier: float = 0.0
  262. dry_base: float = 1.75
  263. dry_allowed_length: int = 2
  264. dry_sequence_breaker_ids: List[int] = []
  265. dry_range: int = 0
  266. skew: float = 0.0
  267. sampler_priority: Optional[List[int]] = []
  268. output_kind: RequestOutputKind = RequestOutputKind.CUMULATIVE
  269. # The below fields are not supposed to be used as an input.
  270. # They are set in post_init.
  271. output_text_buffer_length: int = 0
  272. _all_stop_token_ids: Set[int] = msgspec.field(default_factory=set)
  273. default_values = {
  274. "n": 1,
  275. "best_of": 1,
  276. "presence_penalty": 0.0,
  277. "frequency_penalty": 0.0,
  278. "repetition_penalty": 1.0,
  279. "no_repeat_ngram_size": 0,
  280. "temperature": 1.0,
  281. "dynatemp_min": 0.0,
  282. "dynatemp_max": 0.0,
  283. "dynatemp_exponent": 1.0,
  284. "temperature_last": False,
  285. "top_p": 1.0,
  286. "top_k": -1,
  287. "top_a": 0.0,
  288. "min_p": 0.0,
  289. "tfs": 1.0,
  290. "eta_cutoff": 0.0,
  291. "epsilon_cutoff": 0.0,
  292. "typical_p": 1.0,
  293. "smoothing_factor": 0.0,
  294. "smoothing_curve": 1.0,
  295. "seed": None,
  296. "use_beam_search": False,
  297. "length_penalty": 1.0,
  298. "early_stopping": False,
  299. "stop": [],
  300. "stop_token_ids": [],
  301. "ignore_eos": False,
  302. "max_tokens": 16,
  303. "min_tokens": 0,
  304. "logprobs": None,
  305. "prompt_logprobs": None,
  306. "detokenize": True,
  307. "custom_token_bans": None,
  308. "skip_special_tokens": True,
  309. "spaces_between_special_tokens": True,
  310. "include_stop_str_in_output": False,
  311. "truncate_prompt_tokens": None,
  312. "xtc_threshold": 0.1,
  313. "xtc_probability": 0,
  314. "nsigma": 0.0,
  315. "dry_multiplier": 0.0,
  316. "dry_base": 1.75,
  317. "dry_allowed_length": 2,
  318. "dry_sequence_breaker_ids": [],
  319. "dry_range": 0,
  320. "skew": 0.0,
  321. "sampler_priority": [],
  322. "output_kind": RequestOutputKind.CUMULATIVE,
  323. }
  324. def __post_init__(self) -> None:
  325. self.best_of = self.best_of or self.n
  326. if 0 < self.temperature < _MAX_TEMP:
  327. logger.warning(
  328. f"temperature {self.temperature} is less than {_MAX_TEMP}, "
  329. "which may cause numerical errors NaN or inf in tensors. We "
  330. f"have maxed it out to {_MAX_TEMP}.")
  331. self.temperature = max(self.temperature, _MAX_TEMP)
  332. if self.seed == -1:
  333. self.seed = None
  334. else:
  335. self.seed = self.seed
  336. if self.stop is None:
  337. self.stop = []
  338. elif isinstance(self.stop, str):
  339. self.stop = [self.stop]
  340. else:
  341. self.stop = list(self.stop)
  342. if self.stop_token_ids is None:
  343. self.stop_token_ids = []
  344. else:
  345. self.stop_token_ids = list(self.stop_token_ids)
  346. self.logprobs = 1 if self.logprobs is True else self.logprobs
  347. self.prompt_logprobs = (1 if self.prompt_logprobs is True else
  348. self.prompt_logprobs)
  349. # Number of characters to hold back for stop string evaluation
  350. # until sequence is finished.
  351. if self.stop and not self.include_stop_str_in_output:
  352. self.output_text_buffer_length = max(len(s) for s in self.stop) - 1
  353. self._verify_args()
  354. if self.use_beam_search:
  355. if not APHRODITE_NO_DEPRECATION_WARNING:
  356. logger.warning(
  357. "[IMPORTANT] We plan to discontinue the support for beam "
  358. "search in the next major release. Set "
  359. "APHRODITE_NO_DEPRECATION_WARNING=1 to "
  360. "suppress this warning.")
  361. self._verify_beam_search()
  362. else:
  363. self._verify_non_beam_search()
  364. if self.temperature < _SAMPLING_EPS:
  365. # Zero temperature means greedy sampling.
  366. self.top_p = 1.0
  367. self.top_k = -1
  368. self.min_p = 0.0
  369. self.top_a = 0.0
  370. self._verify_greedy_sampling()
  371. # eos_token_id is added to this by the engine
  372. self._all_stop_token_ids = set(self.stop_token_ids)
  373. def _verify_args(self) -> None:
  374. if not isinstance(self.n, int):
  375. raise ValueError(f"n must be an int, but is of "
  376. f"type {type(self.n)}")
  377. if self.n < 1:
  378. raise ValueError(f"n must be at least 1, got {self.n}.")
  379. if not isinstance(self.best_of, int):
  380. raise ValueError(f'best_of must be an int, but is of '
  381. f'type {type(self.best_of)}')
  382. if self.best_of < self.n:
  383. raise ValueError(f"best_of must be greater than or equal to n, "
  384. f"got n={self.n} and best_of={self.best_of}.")
  385. if not -2.0 <= self.presence_penalty <= 2.0:
  386. raise ValueError("presence_penalty must be in [-2, 2], got "
  387. f"{self.presence_penalty}.")
  388. if not -2.0 <= self.frequency_penalty <= 2.0:
  389. raise ValueError("frequency_penalty must be in [-2, 2], got "
  390. f"{self.frequency_penalty}.")
  391. if self.repetition_penalty < 1.0:
  392. raise ValueError("repetition_penalty must be in [1, inf), got "
  393. f"{self.repetition_penalty}.")
  394. if self.temperature < 0.0:
  395. raise ValueError(
  396. f"temperature must be non-negative, got {self.temperature}.")
  397. if not 0.0 < self.top_p <= 1.0:
  398. raise ValueError(f"top_p must be in (0, 1], got {self.top_p}.")
  399. if self.top_k < -1 or self.top_k == 0:
  400. raise ValueError(f"top_k must be -1 (disable), or at least 1, "
  401. f"got {self.top_k}.")
  402. if self.top_a < 0:
  403. raise ValueError(f"top_a must be non negative, got {self.top_a}.")
  404. if not 0.0 <= self.min_p <= 1.0:
  405. raise ValueError(f"min_p must be in [0, 1], got {self.min_p}.")
  406. if not 0.0 < self.tfs <= 1.0:
  407. raise ValueError(f"tfs must be in (0, 1], got {self.tfs}.")
  408. if self.epsilon_cutoff < 0.0 or self.epsilon_cutoff > 1000.0:
  409. raise ValueError("epsilon_cutoff must be in [0, 1000], got "
  410. f"{self.epsilon_cutoff}.")
  411. # pylint: disable=unneeded-not
  412. if not self.eta_cutoff >= 0:
  413. raise ValueError(
  414. f"eta_cutoff must be non negative, got {self.eta_cutoff}.")
  415. if not 0.0 <= self.typical_p <= 1.0:
  416. raise ValueError(
  417. f"typical_p must be in (0, 1], got {self.typical_p}.")
  418. if self.max_tokens is not None and self.max_tokens < 1:
  419. raise ValueError(
  420. f"max_tokens must be at least 1, got {self.max_tokens}.")
  421. if self.min_tokens < 0:
  422. raise ValueError(f"min_tokens must be greater than or equal to 0, "
  423. f"got {self.min_tokens}.")
  424. if self.max_tokens is not None and self.min_tokens > self.max_tokens:
  425. raise ValueError(
  426. f"min_tokens must be less than or equal to "
  427. f"max_tokens={self.max_tokens}, got {self.min_tokens}.")
  428. if self.logprobs is not None and self.logprobs < 0:
  429. raise ValueError(
  430. f"logprobs must be non-negative, got {self.logprobs}.")
  431. if self.prompt_logprobs is not None and self.prompt_logprobs < 0:
  432. raise ValueError("prompt_logprobs must be non-negative, got "
  433. f"{self.prompt_logprobs}.")
  434. if (self.truncate_prompt_tokens is not None
  435. and self.truncate_prompt_tokens < 1):
  436. raise ValueError(f"truncate_prompt_tokens must be >= 1, "
  437. f"got {self.truncate_prompt_tokens}")
  438. assert isinstance(self.stop, list)
  439. if any(not stop_str for stop_str in self.stop):
  440. raise ValueError("stop cannot contain an empty string.")
  441. if self.stop and not self.detokenize:
  442. raise ValueError(
  443. "stop strings are only supported when detokenize is True. "
  444. "Set detokenize=True to use stop.")
  445. if self.xtc_threshold < 0.0:
  446. raise ValueError(
  447. "xtc_threshold must be non-negative, got "
  448. f"{self.xtc_threshold}.")
  449. if not 0.0 <= self.xtc_probability <= 1.0:
  450. raise ValueError(
  451. "xtc_probability must be in [0, 1], got "
  452. f"{self.xtc_probability}.")
  453. if self.nsigma < 0.0:
  454. raise ValueError(
  455. "nsigma must be non-negative, got "
  456. f"{self.nsigma}.")
  457. if self.dry_multiplier < 0.0:
  458. raise ValueError(
  459. "dry_multiplier must be non-negative, got "
  460. f"{self.dry_multiplier}.")
  461. if self.dry_base <= 1.0:
  462. raise ValueError(
  463. "dry_base must be greater than 1, got "
  464. f"{self.dry_base}.")
  465. if self.dry_allowed_length < 0:
  466. raise ValueError(
  467. "dry_allowed_length must be non-negative, got "
  468. f"{self.dry_allowed_length}.")
  469. if self.dry_range < 0:
  470. raise ValueError(
  471. "dry_range must be non-negative, got "
  472. f"{self.dry_range}.")
  473. if self.skew < 0.0:
  474. raise ValueError(
  475. "skew must be non-negative, got "
  476. f"{self.skew}.")
  477. if self.sampler_priority is not None:
  478. if not self.sampler_priority:
  479. self.sampler_priority = None
  480. return
  481. if not isinstance(self.sampler_priority, list):
  482. raise ValueError(
  483. "sampler_priority must be a list of integers or strings")
  484. try:
  485. self.sampler_priority = [
  486. SamplerID.from_str(x) for x in self.sampler_priority
  487. ]
  488. provided_samplers = set(self.sampler_priority)
  489. except ValueError as e:
  490. raise ValueError(
  491. f"Invalid sampler ID in priority list: {e}"
  492. ) from e
  493. required_samplers = set(SamplerID)
  494. if not required_samplers.issubset(provided_samplers):
  495. missing = required_samplers - provided_samplers
  496. missing_names = [s.name for s in missing]
  497. raise ValueError(
  498. "Missing required samplers in priority list: "
  499. f"{missing_names}"
  500. )
  501. if self.best_of != self.n and self.output_kind == (
  502. RequestOutputKind.DELTA):
  503. raise ValueError("best_of must equal n to use output_kind=DELTA")
  504. def _verify_beam_search(self) -> None:
  505. if self.best_of == 1:
  506. raise ValueError("best_of must be greater than 1 when using beam "
  507. f"search. Got {self.best_of}.")
  508. if self.temperature > _SAMPLING_EPS:
  509. raise ValueError("temperature must be 0 when using beam search.")
  510. if self.top_p < 1.0 - _SAMPLING_EPS:
  511. raise ValueError("top_p must be 1 when using beam search.")
  512. if self.top_k != -1:
  513. raise ValueError("top_k must be -1 when using beam search.")
  514. if self.early_stopping not in [True, False, "never"]:
  515. raise ValueError(
  516. f"early_stopping must be True, False, or 'never', "
  517. f"got {self.early_stopping}.")
  518. def _verify_non_beam_search(self) -> None:
  519. if self.early_stopping is not False:
  520. raise ValueError("early_stopping is not effective and must be "
  521. "False when not using beam search.")
  522. if (self.length_penalty < 1.0 - _SAMPLING_EPS
  523. or self.length_penalty > 1.0 + _SAMPLING_EPS):
  524. raise ValueError(
  525. "length_penalty is not effective and must be the "
  526. "default value of 1.0 when not using beam search.")
  527. def _verify_greedy_sampling(self) -> None:
  528. assert isinstance(self.best_of, int)
  529. if self.best_of > 1:
  530. raise ValueError("best_of must be 1 when using greedy sampling."
  531. f"Got {self.best_of}.")
  532. if self.top_p < 1.0 - _SAMPLING_EPS:
  533. raise ValueError("top_p must be 1 when using greedy sampling.")
  534. if self.top_k != -1:
  535. raise ValueError("top_k must be -1 when using greedy sampling.")
  536. def _verify_with_scheduler_config(
  537. self, scheduler_config: "SchedulerConfig") -> None:
  538. if scheduler_config.single_user_mode:
  539. if self.n > 1:
  540. raise ValueError("n must be 1 in single user mode.")
  541. if self.use_beam_search:
  542. raise ValueError(
  543. "beam search is not supported in single user mode.")
  544. def update_from_generation_config(
  545. self,
  546. generation_config: Dict[str, Any],
  547. model_eos_token_id: Optional[int] = None) -> None:
  548. """Update if there are non-default values from generation_config"""
  549. if model_eos_token_id is not None:
  550. # Add the eos token id into the sampling_params to support
  551. # min_tokens processing.
  552. self._all_stop_token_ids.add(model_eos_token_id)
  553. # Update eos_token_id for generation
  554. if (eos_ids := generation_config.get("eos_token_id")) is not None:
  555. # it can be either int or list of int
  556. eos_ids = {eos_ids} if isinstance(eos_ids, int) else set(eos_ids)
  557. if model_eos_token_id is not None:
  558. # We don't need to include the primary eos_token_id in
  559. # stop_token_ids since it's handled separately for stopping
  560. # purposes.
  561. eos_ids.discard(model_eos_token_id)
  562. if eos_ids:
  563. self._all_stop_token_ids.update(eos_ids)
  564. if not self.ignore_eos:
  565. assert isinstance(self.stop_token_ids, list)
  566. eos_ids.update(self.stop_token_ids)
  567. self.stop_token_ids = list(eos_ids)
  568. @cached_property
  569. def sampling_type(self) -> SamplingType:
  570. if self.use_beam_search:
  571. return SamplingType.BEAM
  572. if self.temperature < _SAMPLING_EPS:
  573. return SamplingType.GREEDY
  574. if self.seed is not None:
  575. return SamplingType.RANDOM_SEED
  576. return SamplingType.RANDOM
  577. @property
  578. def all_stop_token_ids(self) -> Set[int]:
  579. return self._all_stop_token_ids
  580. def clone(self) -> "SamplingParams":
  581. """Deep copy excluding LogitsProcessor objects.
  582. LogitsProcessor objects are excluded because they may contain an
  583. arbitrary, nontrivial amount of data.
  584. """
  585. logit_processor_refs = None if self.logits_processors is None else {
  586. id(lp): lp
  587. for lp in self.logits_processors
  588. }
  589. return copy.deepcopy(self, memo=logit_processor_refs)
  590. def __repr__(self) -> str:
  591. repr_str = "SamplingParams("
  592. for param, default_value in self.default_values.items():
  593. current_value = getattr(self, param)
  594. if current_value != default_value:
  595. repr_str += f"{param}={current_value}, "
  596. repr_str = repr_str.rstrip(', ') + ")"
  597. return repr_str