scheduler.py 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224
  1. import enum
  2. import os
  3. import random
  4. import time
  5. from collections import deque
  6. from dataclasses import dataclass, field
  7. from typing import Deque, Dict, Iterable, List, Optional, Set, Tuple, Union
  8. from loguru import logger
  9. from aphrodite.common.config import CacheConfig, LoRAConfig, SchedulerConfig
  10. from aphrodite.common.sequence import (Sequence, SequenceData, SequenceGroup,
  11. SequenceGroupMetadata, SequenceStatus)
  12. from aphrodite.lora.request import LoRARequest
  13. from aphrodite.processing.interfaces import AllocStatus, BlockSpaceManager
  14. from aphrodite.processing.policy import Policy, PolicyFactory
  15. # Test-only. If configured, decode is preempted with
  16. # ARTIFICIAL_PREEMPTION_PROB% probability.
  17. ENABLE_ARTIFICIAL_PREEMPT = bool(
  18. os.getenv("APHRODITE_TEST_ENABLE_ARTIFICIAL_PREEMPT", False)) # noqa
  19. ARTIFICIAL_PREEMPTION_PROB = 0.5
  20. ARTIFICIAL_PREEMPTION_MAX_CNT = 500
  21. class PreemptionMode(enum.Enum):
  22. """Preemption modes.
  23. 1. Swapping: Swap out the blocks of the preempted sequences to CPU memory
  24. and swap them back in when the sequences are resumed.
  25. 2. Recomputation: Discard the blocks of the preempted sequences and
  26. recompute them when the sequences are resumed, treating the sequences as
  27. new prompts.
  28. """
  29. SWAP = enum.auto()
  30. RECOMPUTE = enum.auto()
  31. @dataclass
  32. class SchedulingBudget:
  33. """The available slots for scheduling.
  34. TODO: Right now, the budget is request_id-aware meaning it can ignore
  35. budget update from the same request_id. It is because in normal scheduling
  36. path, we update RUNNING num_seqs ahead of time, meaning it could be
  37. updated more than once when scheduling RUNNING requests. Since this won't
  38. happen if we only have chunked prefill scheduling, we can remove this
  39. feature from the API when chunked prefill is enabled by default.
  40. """
  41. token_budget: int
  42. max_num_seqs: int
  43. _request_ids_num_batched_tokens: Set[str] = field(default_factory=set)
  44. _request_ids_num_curr_seqs: Set[str] = field(default_factory=set)
  45. _num_batched_tokens: int = 0
  46. _num_curr_seqs: int = 0
  47. def can_schedule(self, *, num_new_tokens: int, num_new_seqs: int):
  48. assert num_new_tokens != 0
  49. assert num_new_seqs != 0
  50. return (self.num_batched_tokens + num_new_tokens <= self.token_budget
  51. and self.num_curr_seqs + num_new_seqs <= self.max_num_seqs)
  52. def remaining_token_budget(self):
  53. return self.token_budget - self.num_batched_tokens
  54. def add_num_batched_tokens(self, req_id: str, num_batched_tokens: int):
  55. if req_id in self._request_ids_num_batched_tokens:
  56. return
  57. self._request_ids_num_batched_tokens.add(req_id)
  58. self._num_batched_tokens += num_batched_tokens
  59. def subtract_num_batched_tokens(self, req_id: str,
  60. num_batched_tokens: int):
  61. if req_id in self._request_ids_num_batched_tokens:
  62. self._request_ids_num_batched_tokens.remove(req_id)
  63. self._num_batched_tokens -= num_batched_tokens
  64. def add_num_seqs(self, req_id: str, num_curr_seqs: int):
  65. if req_id in self._request_ids_num_curr_seqs:
  66. return
  67. self._request_ids_num_curr_seqs.add(req_id)
  68. self._num_curr_seqs += num_curr_seqs
  69. def subtract_num_seqs(self, req_id: str, num_curr_seqs: int):
  70. if req_id in self._request_ids_num_curr_seqs:
  71. self._request_ids_num_curr_seqs.remove(req_id)
  72. self._num_curr_seqs -= num_curr_seqs
  73. @property
  74. def num_batched_tokens(self):
  75. return self._num_batched_tokens
  76. @property
  77. def num_curr_seqs(self):
  78. return self._num_curr_seqs
  79. @dataclass
  80. class ScheduledSequenceGroup:
  81. # A sequence group that's scheduled.
  82. seq_group: SequenceGroup
  83. # The total chunk size (number of tokens) to process for next iteration.
  84. # 1 for decoding. Same as prompt tokens for prefill, but if prefill is
  85. # chunked, it can be smaller than that.
  86. token_chunk_size: int
  87. @dataclass
  88. class SchedulerOutputs:
  89. """The scheduling decision made from a scheduler."""
  90. # Scheduled sequence groups.
  91. scheduled_seq_groups: Iterable[ScheduledSequenceGroup]
  92. # Number of prefill groups scheduled.
  93. num_prefill_groups: int
  94. # Total number of batched tokens.
  95. num_batched_tokens: int
  96. # Blocks to swap in. List of CPU -> GPU block number.
  97. blocks_to_swap_in: List[Tuple[int, int]]
  98. # Blocks to swap out. List of GPU -> CPU block number.
  99. blocks_to_swap_out: List[Tuple[int, int]]
  100. # Blocks to copy. Source to dest block.
  101. blocks_to_copy: List[Tuple[int, int]]
  102. # Sequence groups that are going to be ignored.
  103. ignored_seq_groups: List[SequenceGroup]
  104. # The number of slots for lookahead decoding.
  105. num_lookahead_slots: int
  106. # The number of requests in the running queue
  107. running_queue_size: int
  108. preempted: int
  109. def __post_init__(self):
  110. # Swap in and swap out should never happen at the same time.
  111. assert not (self.blocks_to_swap_in and self.blocks_to_swap_out)
  112. self.num_loras: int = len(self.lora_requests)
  113. if self.num_loras > 0:
  114. self._sort_by_lora_ids()
  115. def is_empty(self) -> bool:
  116. # NOTE: We do not consider the ignored sequence groups.
  117. return (not self.scheduled_seq_groups and not self.blocks_to_swap_in
  118. and not self.blocks_to_swap_out and not self.blocks_to_copy)
  119. def _sort_by_lora_ids(self):
  120. self.scheduled_seq_groups = sorted(
  121. self.scheduled_seq_groups,
  122. key=lambda g: (g.seq_group.lora_int_id, g.seq_group.request_id))
  123. @property
  124. def lora_requests(self) -> Set[LoRARequest]:
  125. return {
  126. g.seq_group.lora_request
  127. for g in self.scheduled_seq_groups
  128. if g.seq_group.lora_request is not None
  129. }
  130. @dataclass
  131. class SchedulerRunningOutputs:
  132. """The requests that are scheduled from a running queue.
  133. Could contain prefill (prefill that's chunked) or decodes. If there's not
  134. enough memory, it can be preempted (for recompute) or swapped out.
  135. """
  136. # Selected sequences that are running and in a decoding phase.
  137. decode_seq_groups: List[SequenceGroup]
  138. # Selected sequences that are running and in a prefill phase.
  139. # I.e., it means the prefill has been chunked.
  140. prefill_seq_groups: List[SequenceGroup]
  141. # The preempted sequences.
  142. preempted: List[SequenceGroup]
  143. # Sequences that are swapped out.
  144. swapped_out: List[SequenceGroup]
  145. # The blocks to swap out.
  146. blocks_to_swap_out: List[Tuple[int, int]]
  147. # The blocks to copy.
  148. blocks_to_copy: List[Tuple[int, int]]
  149. # The number of slots for lookahead decoding.
  150. num_lookahead_slots: int
  151. @classmethod
  152. def create_empty(cls) -> "SchedulerRunningOutputs":
  153. return SchedulerRunningOutputs(
  154. decode_seq_groups=[],
  155. prefill_seq_groups=[],
  156. preempted=[],
  157. swapped_out=[],
  158. blocks_to_swap_out=[],
  159. blocks_to_copy=[],
  160. num_lookahead_slots=0,
  161. )
  162. @dataclass
  163. class SchedulerSwappedInOutputs:
  164. """The requests that are scheduled from a swap queue.
  165. Could contain prefill (prefill that's chunked) or decodes.
  166. """
  167. # Selected sequences that are going to be swapped in and is in a
  168. # decoding phase.
  169. decode_seq_groups: List[SequenceGroup]
  170. # Selected sequences that are going to be swapped in and in a prefill
  171. # phase. I.e., it means the prefill has been chunked.
  172. prefill_seq_groups: List[SequenceGroup]
  173. # The blocks to swap in.
  174. blocks_to_swap_in: List[Tuple[int, int]]
  175. # The blocks to copy.
  176. blocks_to_copy: List[Tuple[int, int]]
  177. # The number of slots for lookahead decoding.
  178. num_lookahead_slots: int
  179. # Infeasible sequence groups.
  180. infeasible_seq_groups: List[SequenceGroup]
  181. @classmethod
  182. def create_empty(cls) -> "SchedulerSwappedInOutputs":
  183. return SchedulerSwappedInOutputs(
  184. decode_seq_groups=[],
  185. prefill_seq_groups=[],
  186. blocks_to_swap_in=[],
  187. blocks_to_copy=[],
  188. num_lookahead_slots=0,
  189. infeasible_seq_groups=[],
  190. )
  191. @dataclass
  192. class SchedulerPrefillOutputs:
  193. """The requests that are scheduled from a waiting queue.
  194. Could contain a fresh prefill requests or preempted requests that need
  195. to be recomputed from scratch.
  196. """
  197. # Selected sequences for prefill.
  198. seq_groups: List[SequenceGroup]
  199. # Ignored sequence groups.
  200. ignored_seq_groups: List[SequenceGroup]
  201. num_lookahead_slots: int
  202. @classmethod
  203. def create_empty(cls) -> "SchedulerPrefillOutputs":
  204. return SchedulerPrefillOutputs(
  205. seq_groups=[],
  206. ignored_seq_groups=[],
  207. num_lookahead_slots=0,
  208. )
  209. class Scheduler:
  210. def __init__(
  211. self,
  212. scheduler_config: SchedulerConfig,
  213. cache_config: CacheConfig,
  214. lora_config: Optional[LoRAConfig],
  215. pipeline_parallel_size: int = 1,
  216. ) -> None:
  217. self.scheduler_config = scheduler_config
  218. self.cache_config = cache_config
  219. # Note for LoRA scheduling: the current policy is extremely
  220. # simple and NOT fair. It can lead to starvation of some
  221. # LoRAs. This should be improved in the future.
  222. self.lora_config = lora_config
  223. version = "v1"
  224. if self.scheduler_config.use_v2_block_manager:
  225. version = "v2"
  226. if self.scheduler_config.embedding_mode:
  227. version = "embedding"
  228. BlockSpaceManagerImpl = BlockSpaceManager.get_block_space_manager_class(
  229. version)
  230. num_gpu_blocks = cache_config.num_gpu_blocks
  231. if num_gpu_blocks:
  232. num_gpu_blocks //= pipeline_parallel_size
  233. num_cpu_blocks = cache_config.num_cpu_blocks
  234. if num_cpu_blocks:
  235. num_cpu_blocks //= pipeline_parallel_size
  236. # Create the block space manager.
  237. self.block_manager = BlockSpaceManagerImpl(
  238. block_size=self.cache_config.block_size,
  239. num_gpu_blocks=num_gpu_blocks,
  240. num_cpu_blocks=num_cpu_blocks,
  241. sliding_window=self.cache_config.sliding_window,
  242. enable_caching=self.cache_config.enable_prefix_caching)
  243. # Sequence groups in the WAITING state.
  244. # Contain new prefill or preempted requests.
  245. self.waiting: Deque[SequenceGroup] = deque()
  246. # Sequence groups in the RUNNING state.
  247. # Contain decode requests.
  248. self.running: Deque[SequenceGroup] = deque()
  249. # Sequence groups in the SWAPPED state.
  250. # Contain decode requests that are swapped out.
  251. self.swapped: Deque[SequenceGroup] = deque()
  252. # Sequence groups finished requests ids since last step iteration.
  253. # It lets the model know that any state associated with these requests
  254. # can and must be released after the current step.
  255. self._finished_requests_ids: List[str] = list()
  256. # Time at previous scheduling step
  257. self.prev_time = 0.0
  258. # Did we schedule a prompt at previous step?
  259. self.prev_prompt = False
  260. # Latency of the last prompt step
  261. self.last_prompt_latency = 0.0
  262. # preemption mode, RECOMPUTE or SWAP
  263. self.user_specified_preemption_mode = scheduler_config.preemption_mode
  264. # The following field is test-only. It is used to inject artificial
  265. # preemption.
  266. self.enable_artificial_preemption = ENABLE_ARTIFICIAL_PREEMPT
  267. self.artificial_preempt_cnt = (ARTIFICIAL_PREEMPTION_MAX_CNT
  268. if self.enable_artificial_preemption
  269. else 0)
  270. self.num_cumulative_preemption: int = 0
  271. @property
  272. def lora_enabled(self) -> bool:
  273. return bool(self.lora_config)
  274. @property
  275. def num_decoding_tokens_per_seq(self) -> int:
  276. """The number of new tokens."""
  277. return 1
  278. def add_seq_group(self, seq_group: SequenceGroup) -> None:
  279. # Add sequence groups to the waiting queue.
  280. self.waiting.append(seq_group)
  281. def abort_seq_group(self, request_id: Union[str, Iterable[str]]) -> None:
  282. """Aborts a sequence group with the given ID.
  283. Check if the sequence group with the given ID
  284. is present in any of the state queue.
  285. If present, remove the sequence group from the state queue.
  286. Also, if any of the sequences in the sequence group is not finished,
  287. free the sequence with status `FINISHED_ABORTED`.
  288. Otherwise, do nothing.
  289. Args:
  290. request_id: The ID(s) of the sequence group to abort.
  291. """
  292. if isinstance(request_id, str):
  293. request_id = (request_id, )
  294. request_ids = set(request_id)
  295. for state_queue in [self.waiting, self.running, self.swapped]:
  296. aborted_groups: List[SequenceGroup] = []
  297. for seq_group in state_queue:
  298. if not request_ids:
  299. # Using 'break' here may add two extra iterations,
  300. # but is acceptable to reduce complexity.
  301. break
  302. if seq_group.request_id in request_ids:
  303. # Appending aborted group into pending list.
  304. aborted_groups.append(seq_group)
  305. request_ids.remove(seq_group.request_id)
  306. for aborted_group in aborted_groups:
  307. # Remove the sequence group from the state queue.
  308. state_queue.remove(aborted_group)
  309. for seq in aborted_group.get_seqs():
  310. if seq.is_finished():
  311. continue
  312. seq.status = SequenceStatus.FINISHED_ABORTED
  313. self.free_seq(seq)
  314. def has_unfinished_seqs(self) -> bool:
  315. return len(self.waiting) != 0 or len(self.running) != 0 or len(
  316. self.swapped) != 0
  317. def get_num_unfinished_seq_groups(self) -> int:
  318. return len(self.waiting) + len(self.running) + len(self.swapped)
  319. def get_and_reset_finished_requests_ids(self) -> List[str]:
  320. """Flushes the list of request ids of previously finished seq_groups."""
  321. finished_requests_ids = self._finished_requests_ids
  322. self._finished_requests_ids = list()
  323. return finished_requests_ids
  324. def _schedule_running(
  325. self,
  326. running_queue: deque,
  327. budget: SchedulingBudget,
  328. curr_loras: Optional[Set[int]],
  329. policy: Policy,
  330. enable_chunking: bool = False,
  331. ) -> Tuple[deque, SchedulerRunningOutputs]:
  332. """Schedule sequence groups that are running.
  333. Running queue should include decode and chunked prefill requests.
  334. Args:
  335. running_queue: The queue that contains running requests (i.e.,
  336. decodes). The given arguments are NOT in-place modified.
  337. budget: The scheduling budget. The argument is in-place updated
  338. when any decodes are preempted.
  339. curr_loras: Currently batched lora request ids. The argument is
  340. in-place updated when any decodes are preempted.
  341. policy: The sorting policy to sort running_queue.
  342. enable_chunking: If True, seq group can be chunked and only a
  343. chunked number of tokens are scheduled if
  344. `budget.num_batched_tokens` has not enough capacity to schedule
  345. all tokens.
  346. Returns:
  347. A tuple of remaining running queue (should be always 0) after
  348. scheduling and SchedulerRunningOutputs.
  349. """
  350. # Blocks that need to be swapped or copied before model execution.
  351. blocks_to_swap_out: List[Tuple[int, int]] = []
  352. blocks_to_copy: List[Tuple[int, int]] = []
  353. decode_seq_groups: List[ScheduledSequenceGroup] = []
  354. prefill_seq_groups: List[ScheduledSequenceGroup] = []
  355. preempted: List[SequenceGroup] = []
  356. swapped_out: List[SequenceGroup] = []
  357. # NOTE: Preemption happens only when there is no available slot
  358. # to keep all the sequence groups in the RUNNING state.
  359. # In this case, the policy is responsible for deciding which sequence
  360. # groups to preempt.
  361. now = time.time()
  362. running_queue = policy.sort_by_priority(now, running_queue)
  363. while running_queue:
  364. seq_group = running_queue[0]
  365. num_running_tokens = self._get_num_new_tokens(
  366. seq_group, SequenceStatus.RUNNING, enable_chunking, budget)
  367. if num_running_tokens == 0:
  368. break
  369. running_queue.popleft()
  370. while not self._can_append_slots(seq_group):
  371. budget.subtract_num_batched_tokens(seq_group.request_id,
  372. num_running_tokens)
  373. num_running_seqs = seq_group.get_max_num_running_seqs()
  374. budget.subtract_num_seqs(seq_group.request_id,
  375. num_running_seqs)
  376. if (curr_loras is not None and seq_group.lora_int_id > 0
  377. and seq_group.lora_int_id in curr_loras):
  378. curr_loras.remove(seq_group.lora_int_id)
  379. if running_queue:
  380. # Preempt the lowest-priority sequence groups.
  381. victim_seq_group = running_queue.pop()
  382. preempted_mode = self._preempt(victim_seq_group,
  383. blocks_to_swap_out)
  384. if preempted_mode == PreemptionMode.RECOMPUTE:
  385. preempted.append(victim_seq_group)
  386. else:
  387. swapped_out.append(victim_seq_group)
  388. else:
  389. # No other sequence groups can be preempted.
  390. # Preempt the current sequence group.
  391. preempted_mode = self._preempt(seq_group,
  392. blocks_to_swap_out)
  393. if preempted_mode == PreemptionMode.RECOMPUTE:
  394. preempted.append(seq_group)
  395. else:
  396. swapped_out.append(seq_group)
  397. break
  398. else:
  399. self._append_slots(seq_group, blocks_to_copy)
  400. is_prefill = seq_group.is_prefill()
  401. if is_prefill:
  402. prefill_seq_groups.append(
  403. ScheduledSequenceGroup(
  404. seq_group=seq_group,
  405. token_chunk_size=num_running_tokens))
  406. else:
  407. decode_seq_groups.append(
  408. ScheduledSequenceGroup(seq_group=seq_group,
  409. token_chunk_size=1))
  410. budget.add_num_batched_tokens(seq_group.request_id,
  411. num_running_tokens)
  412. # OPTIMIZATION: Note that get_max_num_running_seqs is
  413. # expensive. For the default scheduling chase where
  414. # enable_chunking is False, num_seqs are updated before running
  415. # this method, so we don't have to update it again here.
  416. if enable_chunking:
  417. num_running_seqs = seq_group.get_max_num_running_seqs()
  418. budget.add_num_seqs(seq_group.request_id, num_running_seqs)
  419. if curr_loras is not None and seq_group.lora_int_id > 0:
  420. curr_loras.add(seq_group.lora_int_id)
  421. return running_queue, SchedulerRunningOutputs(
  422. decode_seq_groups=decode_seq_groups,
  423. prefill_seq_groups=prefill_seq_groups,
  424. preempted=preempted,
  425. swapped_out=swapped_out,
  426. blocks_to_swap_out=blocks_to_swap_out,
  427. blocks_to_copy=blocks_to_copy,
  428. num_lookahead_slots=self._get_num_lookahead_slots(
  429. is_prefill=False))
  430. def _schedule_swapped(
  431. self,
  432. swapped_queue: deque,
  433. budget: SchedulingBudget,
  434. curr_loras: Optional[Set[int]],
  435. policy: Policy,
  436. enable_chunking: bool = False,
  437. ) -> Tuple[deque, SchedulerSwappedInOutputs]:
  438. """Schedule sequence groups that are swapped out.
  439. It schedules swapped requests as long as it fits `budget` and
  440. curr_loras <= max_lora from the scheduling config. The input arguments
  441. `budget` and `curr_loras` are updated based on scheduled seq_groups.
  442. Args:
  443. swapped_queue: The queue that contains swapped out requests.
  444. The given arguments are NOT in-place modified.
  445. budget: The scheduling budget. The argument is in-place updated
  446. when any requests are swapped in.
  447. curr_loras: Currently batched lora request ids. The argument is
  448. in-place updated when any requests are swapped in.
  449. policy: The sorting policy to sort swapped_queue.
  450. enable_chunking: If True, seq group can be chunked and only a
  451. chunked number of tokens are scheduled if
  452. `budget.num_batched_tokens` has not enough capacity to schedule
  453. all tokens.
  454. Returns:
  455. A tuple of remaining swapped_queue after scheduling and
  456. SchedulerSwappedInOutputs.
  457. """
  458. # Blocks that need to be swapped or copied before model execution.
  459. blocks_to_swap_in: List[Tuple[int, int]] = []
  460. blocks_to_copy: List[Tuple[int, int]] = []
  461. decode_seq_groups: List[ScheduledSequenceGroup] = []
  462. prefill_seq_groups: List[ScheduledSequenceGroup] = []
  463. now = time.time()
  464. swapped_queue = policy.sort_by_priority(now, swapped_queue)
  465. infeasible_seq_groups: List[SequenceGroup] = []
  466. leftover_swapped: Deque[SequenceGroup] = deque()
  467. while swapped_queue:
  468. seq_group = swapped_queue[0]
  469. # If the sequence group cannot be swapped in, stop.
  470. is_prefill = seq_group.is_prefill()
  471. alloc_status = self.block_manager.can_swap_in(
  472. seq_group, self._get_num_lookahead_slots(is_prefill))
  473. if alloc_status == AllocStatus.LATER:
  474. break
  475. elif alloc_status == AllocStatus.NEVER:
  476. logger.warning(f"Failing the request {seq_group.request_id} "
  477. "because there's not enough kv cache blocks to "
  478. "run the entire sequence.")
  479. for seq in seq_group.get_seqs():
  480. seq.status = SequenceStatus.FINISHED_IGNORED
  481. infeasible_seq_groups.append(seq_group)
  482. swapped_queue.popleft()
  483. continue
  484. lora_int_id = 0
  485. if self.lora_enabled:
  486. lora_int_id = seq_group.lora_int_id
  487. assert curr_loras is not None
  488. assert self.lora_config is not None
  489. if (lora_int_id > 0 and (lora_int_id not in curr_loras)
  490. and len(curr_loras) >= self.lora_config.max_loras):
  491. # We don't have a space for another LoRA, so
  492. # we ignore this request for now.
  493. leftover_swapped.appendleft(seq_group)
  494. swapped_queue.popleft()
  495. continue
  496. # The total number of sequences in the RUNNING state should not
  497. # exceed the maximum number of sequences.
  498. num_new_seqs = seq_group.get_max_num_running_seqs()
  499. num_new_tokens = self._get_num_new_tokens(seq_group,
  500. SequenceStatus.SWAPPED,
  501. enable_chunking, budget)
  502. if (num_new_tokens == 0
  503. or not budget.can_schedule(num_new_tokens=num_new_tokens,
  504. num_new_seqs=num_new_seqs)):
  505. break
  506. if lora_int_id > 0 and curr_loras is not None:
  507. curr_loras.add(lora_int_id)
  508. swapped_queue.popleft()
  509. self._swap_in(seq_group, blocks_to_swap_in)
  510. self._append_slots(seq_group, blocks_to_copy)
  511. is_prefill = seq_group.is_prefill()
  512. if is_prefill:
  513. prefill_seq_groups.append(
  514. ScheduledSequenceGroup(seq_group,
  515. token_chunk_size=num_new_tokens))
  516. else:
  517. decode_seq_groups.append(
  518. ScheduledSequenceGroup(seq_group, token_chunk_size=1))
  519. budget.add_num_batched_tokens(seq_group.request_id, num_new_tokens)
  520. budget.add_num_seqs(seq_group.request_id, num_new_seqs)
  521. swapped_queue.extendleft(leftover_swapped)
  522. return swapped_queue, SchedulerSwappedInOutputs(
  523. decode_seq_groups=decode_seq_groups,
  524. prefill_seq_groups=prefill_seq_groups,
  525. blocks_to_swap_in=blocks_to_swap_in,
  526. blocks_to_copy=blocks_to_copy,
  527. num_lookahead_slots=self._get_num_lookahead_slots(
  528. is_prefill=False),
  529. infeasible_seq_groups=infeasible_seq_groups,
  530. )
  531. def _get_prompt_limit(self, seq_group: SequenceGroup) -> int:
  532. if self.scheduler_config.chunked_prefill_enabled:
  533. prompt_limit = self.scheduler_config.max_model_len
  534. else:
  535. prompt_limit = min(self.scheduler_config.max_model_len,
  536. self.scheduler_config.max_num_batched_tokens)
  537. # Model is fine tuned with long context. Return the fine tuned max_len.
  538. if (seq_group.lora_request
  539. and seq_group.lora_request.long_lora_max_len):
  540. assert prompt_limit <= seq_group.lora_request.long_lora_max_len
  541. return seq_group.lora_request.long_lora_max_len
  542. else:
  543. return prompt_limit
  544. def _schedule_prefills(
  545. self,
  546. waiting_queue: deque,
  547. budget: SchedulingBudget,
  548. curr_loras: Optional[Set[int]],
  549. enable_chunking: bool = False,
  550. ) -> Tuple[deque, SchedulerPrefillOutputs]:
  551. """Schedule sequence groups that are in prefill stage.
  552. Note that the current scheduler treats PREEMPTED_FOR_RECOMPUTE
  553. as a new prefill (that starts from beginning -> most recently generated
  554. tokens).
  555. It schedules waiting requests as long as it fits `budget` and
  556. curr_loras <= max_lora from the scheduling config. The input arguments
  557. `budget` and `curr_loras` are updated based on scheduled seq_groups.
  558. Args:
  559. waiting_queue: The queue that contains prefill requests.
  560. The given arguments are NOT in-place modified.
  561. budget: The scheduling budget. The argument is in-place updated
  562. when any requests are scheduled.
  563. curr_loras: Currently batched lora request ids. The argument is
  564. in-place updated when any requests are scheduled.
  565. enable_chunking: If True, seq group can be chunked and only a
  566. chunked number of tokens are scheduled if
  567. `budget.num_batched_tokens` has not enough capacity to schedule
  568. all tokens.
  569. Returns:
  570. A tuple of remaining waiting_queue after scheduling and
  571. SchedulerSwappedInOutputs.
  572. """
  573. ignored_seq_groups: List[SequenceGroup] = []
  574. seq_groups: List[SequenceGroup] = []
  575. # We don't sort waiting queue because we assume it is sorted.
  576. # Copy the queue so that the input queue is not modified.
  577. waiting_queue = deque([s for s in waiting_queue])
  578. leftover_waiting_sequences: Deque[SequenceGroup] = deque()
  579. while self._passed_delay(time.time()) and waiting_queue:
  580. seq_group = waiting_queue[0]
  581. waiting_seqs = seq_group.get_seqs(status=SequenceStatus.WAITING)
  582. assert len(waiting_seqs) == 1, (
  583. "Waiting sequence group should have only one prompt "
  584. "sequence.")
  585. num_new_tokens = self._get_num_new_tokens(seq_group,
  586. SequenceStatus.WAITING,
  587. enable_chunking, budget)
  588. if not enable_chunking:
  589. num_prompt_tokens = waiting_seqs[0].get_len()
  590. assert num_new_tokens == num_prompt_tokens
  591. prompt_limit = self._get_prompt_limit(seq_group)
  592. if num_new_tokens > prompt_limit:
  593. logger.warning(
  594. f"Input prompt ({num_new_tokens}) tokens) is too long"
  595. f" and exceeds limit of {prompt_limit}")
  596. for seq in waiting_seqs:
  597. seq.status = SequenceStatus.FINISHED_IGNORED
  598. ignored_seq_groups.append(seq_group)
  599. waiting_queue.popleft()
  600. continue
  601. # If the sequence group cannot be allocated, stop.
  602. can_allocate = self.block_manager.can_allocate(seq_group)
  603. if can_allocate == AllocStatus.LATER:
  604. break
  605. elif can_allocate == AllocStatus.NEVER:
  606. logger.warning(
  607. f"Input prompt ({num_new_tokens} tokens) is too long"
  608. f" and exceeds the capacity of block_manager")
  609. for seq in waiting_seqs:
  610. seq.status = SequenceStatus.FINISHED_IGNORED
  611. ignored_seq_groups.append(seq_group)
  612. waiting_queue.popleft()
  613. continue
  614. lora_int_id = 0
  615. if self.lora_enabled:
  616. lora_int_id = seq_group.lora_int_id
  617. assert curr_loras is not None
  618. assert self.lora_config is not None
  619. if (self.lora_enabled and lora_int_id > 0
  620. and lora_int_id not in curr_loras
  621. and len(curr_loras) >= self.lora_config.max_loras):
  622. # We don't have a space for another LoRA, so
  623. # we ignore this request for now.
  624. leftover_waiting_sequences.appendleft(seq_group)
  625. waiting_queue.popleft()
  626. continue
  627. num_new_seqs = seq_group.get_max_num_running_seqs()
  628. if (num_new_tokens == 0
  629. or not budget.can_schedule(num_new_tokens=num_new_tokens,
  630. num_new_seqs=num_new_seqs)):
  631. break
  632. # Can schedule this request.
  633. if curr_loras is not None and lora_int_id > 0:
  634. curr_loras.add(lora_int_id)
  635. waiting_queue.popleft()
  636. self._allocate_and_set_running(seq_group)
  637. seq_groups.append(
  638. ScheduledSequenceGroup(seq_group=seq_group,
  639. token_chunk_size=num_new_tokens))
  640. budget.add_num_batched_tokens(seq_group.request_id, num_new_tokens)
  641. budget.add_num_seqs(seq_group.request_id, num_new_seqs)
  642. # Queue requests that couldn't be scheduled.
  643. waiting_queue.extendleft(leftover_waiting_sequences)
  644. if len(seq_groups) > 0:
  645. self.prev_prompt = True
  646. return waiting_queue, SchedulerPrefillOutputs(
  647. seq_groups=seq_groups,
  648. ignored_seq_groups=ignored_seq_groups,
  649. num_lookahead_slots=self._get_num_lookahead_slots(is_prefill=True))
  650. def _schedule_default(self) -> SchedulerOutputs:
  651. """Schedule queued requests.
  652. The current policy is designed to optimize the throughput. First,
  653. it batches as many prefill requests as possible. And it schedules
  654. decodes. If there's a pressure on GPU memory, decode requests can
  655. be swapped or preempted.
  656. """
  657. # Include running requests to the budget.
  658. budget = SchedulingBudget(
  659. token_budget=self.scheduler_config.max_num_batched_tokens,
  660. max_num_seqs=self.scheduler_config.max_num_seqs,
  661. )
  662. # Make sure we include num running seqs before scheduling prefill,
  663. # so that we don't schedule beyond max_num_seqs for prefill.
  664. for seq_group in self.running:
  665. budget.add_num_seqs(seq_group.request_id,
  666. seq_group.get_max_num_running_seqs())
  667. curr_loras = set(
  668. seq_group.lora_int_id for seq_group in self.running
  669. if seq_group.lora_int_id > 0) if self.lora_enabled else None
  670. remaining_waiting, prefills = (self.waiting,
  671. SchedulerPrefillOutputs.create_empty())
  672. remaining_running, running_scheduled = (
  673. self.running, SchedulerRunningOutputs.create_empty())
  674. remaining_swapped, swapped_in = (
  675. self.swapped, SchedulerSwappedInOutputs.create_empty())
  676. # If any requests are swapped, prioritized swapped requests.
  677. if not self.swapped:
  678. remaining_waiting, prefills = self._schedule_prefills(
  679. self.waiting, budget, curr_loras, enable_chunking=False)
  680. fcfs_policy = PolicyFactory.get_policy(policy_name="fcfs")
  681. # Don't schedule decodes if prefills are scheduled.
  682. # NOTE: If `_schedule_prefills` doesn't enable chunking, self.running
  683. # only contains decode requests, not chunked prefills.
  684. if len(prefills.seq_groups) == 0:
  685. remaining_running, running_scheduled = self._schedule_running(
  686. self.running,
  687. budget,
  688. curr_loras,
  689. fcfs_policy,
  690. enable_chunking=False)
  691. # If any sequence group is preempted, do not swap in any sequence
  692. # group. because it means there's no slot for new running requests.
  693. if len(running_scheduled.preempted) + len(
  694. running_scheduled.swapped_out) == 0:
  695. remaining_swapped, swapped_in = self._schedule_swapped(
  696. self.swapped, budget, curr_loras, fcfs_policy)
  697. assert (budget.num_batched_tokens <=
  698. self.scheduler_config.max_num_batched_tokens)
  699. assert budget.num_curr_seqs <= self.scheduler_config.max_num_seqs
  700. # Update waiting requests.
  701. self.waiting = remaining_waiting
  702. self.waiting.extendleft(running_scheduled.preempted)
  703. # Update new running requests.
  704. self.running = remaining_running
  705. self.running.extend([s.seq_group for s in prefills.seq_groups])
  706. self.running.extend(
  707. [s.seq_group for s in running_scheduled.decode_seq_groups])
  708. self.running.extend(
  709. [s.seq_group for s in swapped_in.decode_seq_groups])
  710. # Update swapped requests.
  711. # Update swapped requests.
  712. self.swapped = remaining_swapped
  713. self.swapped.extend(running_scheduled.swapped_out)
  714. preempted = (len(running_scheduled.preempted) +
  715. len(running_scheduled.swapped_out))
  716. # There should be no prefill from running queue because this policy
  717. # doesn't allow chunked prefills.
  718. assert len(running_scheduled.prefill_seq_groups) == 0
  719. assert len(swapped_in.prefill_seq_groups) == 0
  720. return SchedulerOutputs(
  721. scheduled_seq_groups=(prefills.seq_groups +
  722. running_scheduled.decode_seq_groups +
  723. swapped_in.decode_seq_groups),
  724. num_prefill_groups=len(prefills.seq_groups),
  725. num_batched_tokens=budget.num_batched_tokens,
  726. blocks_to_swap_in=swapped_in.blocks_to_swap_in,
  727. blocks_to_swap_out=running_scheduled.blocks_to_swap_out,
  728. blocks_to_copy=running_scheduled.blocks_to_copy +
  729. swapped_in.blocks_to_copy,
  730. ignored_seq_groups=prefills.ignored_seq_groups +
  731. swapped_in.infeasible_seq_groups,
  732. num_lookahead_slots=running_scheduled.num_lookahead_slots,
  733. running_queue_size=len(self.running),
  734. preempted=preempted,
  735. )
  736. def _schedule_chunked_prefill(self):
  737. """Schedule queued requests.
  738. Chunked prefill allows to chunk prefill requests, batch them together
  739. with decode requests. This policy 1. schedule as many decoding requests
  740. as possible. 2. schedule chunked prefill requests that are not
  741. finished. 3. schedule swapped request. 4. schedule new prefill
  742. requests.
  743. The policy can sustain the high GPU utilization because it can put
  744. prefill and decodes requests to the same batch, while it improves
  745. inter token latency because decodes requests don't need to blocked
  746. by prefill requests.
  747. """
  748. budget = SchedulingBudget(
  749. token_budget=self.scheduler_config.max_num_batched_tokens,
  750. max_num_seqs=self.scheduler_config.max_num_seqs,
  751. )
  752. curr_loras: Set[int] = set()
  753. remaining_waiting, prefills = (self.waiting,
  754. SchedulerPrefillOutputs.create_empty())
  755. remaining_running, running_scheduled = (
  756. self.running, SchedulerRunningOutputs.create_empty())
  757. remaining_swapped, swapped_in = (
  758. self.swapped, SchedulerSwappedInOutputs.create_empty())
  759. # Decoding should be always scheduled first by fcfs.
  760. fcfs_policy = PolicyFactory.get_policy(policy_name="fcfs")
  761. remaining_running, running_scheduled = self._schedule_running(
  762. self.running,
  763. budget,
  764. curr_loras,
  765. fcfs_policy,
  766. enable_chunking=True)
  767. # Schedule swapped out requests.
  768. # If preemption happens, it means we don't have space for swap-in.
  769. if len(running_scheduled.preempted) + len(
  770. running_scheduled.swapped_out) == 0:
  771. remaining_swapped, swapped_in = self._schedule_swapped(
  772. self.swapped, budget, curr_loras, fcfs_policy)
  773. # Schedule new prefills.
  774. remaining_waiting, prefills = self._schedule_prefills(
  775. self.waiting, budget, curr_loras, enable_chunking=True)
  776. assert (budget.num_batched_tokens <=
  777. self.scheduler_config.max_num_batched_tokens)
  778. assert budget.num_curr_seqs <= self.scheduler_config.max_num_seqs
  779. # Update waiting requests.
  780. self.waiting = remaining_waiting
  781. self.waiting.extendleft(running_scheduled.preempted)
  782. # Update new running requests.
  783. self.running = remaining_running
  784. self.running.extend([s.seq_group for s in prefills.seq_groups])
  785. self.running.extend(
  786. [s.seq_group for s in running_scheduled.decode_seq_groups])
  787. self.running.extend(
  788. [s.seq_group for s in running_scheduled.prefill_seq_groups])
  789. self.running.extend(
  790. [s.seq_group for s in swapped_in.decode_seq_groups])
  791. self.running.extend(
  792. [s.seq_group for s in swapped_in.prefill_seq_groups])
  793. # Update swapped requests.
  794. self.swapped = remaining_swapped
  795. self.swapped.extend(running_scheduled.swapped_out)
  796. return SchedulerOutputs(
  797. scheduled_seq_groups=(prefills.seq_groups +
  798. running_scheduled.prefill_seq_groups +
  799. swapped_in.prefill_seq_groups +
  800. running_scheduled.decode_seq_groups +
  801. swapped_in.decode_seq_groups),
  802. num_prefill_groups=(len(prefills.seq_groups) +
  803. len(swapped_in.prefill_seq_groups) +
  804. len(running_scheduled.prefill_seq_groups)),
  805. num_batched_tokens=budget.num_batched_tokens,
  806. blocks_to_swap_in=swapped_in.blocks_to_swap_in,
  807. blocks_to_swap_out=running_scheduled.blocks_to_swap_out,
  808. blocks_to_copy=running_scheduled.blocks_to_copy +
  809. swapped_in.blocks_to_copy,
  810. ignored_seq_groups=prefills.ignored_seq_groups +
  811. swapped_in.infeasible_seq_groups,
  812. num_lookahead_slots=running_scheduled.num_lookahead_slots,
  813. running_queue_size=len(self.running),
  814. preempted=(len(running_scheduled.preempted) +
  815. len(running_scheduled.swapped_out)),
  816. )
  817. def _schedule(self) -> SchedulerOutputs:
  818. """Schedule queued requests."""
  819. if self.scheduler_config.chunked_prefill_enabled:
  820. return self._schedule_chunked_prefill()
  821. else:
  822. return self._schedule_default()
  823. def _can_append_slots(self, seq_group: SequenceGroup) -> bool:
  824. """Determine whether or not we have enough space in the KV cache to
  825. continue generation of the sequence group.
  826. """
  827. # It is True only for testing case to trigger artificial preemption.
  828. if (self.enable_artificial_preemption
  829. and random.uniform(0, 1) < ARTIFICIAL_PREEMPTION_PROB
  830. and self.artificial_preempt_cnt > 0):
  831. self.artificial_preempt_cnt -= 1
  832. return False
  833. # Appending slots only occurs in decoding.
  834. is_prefill = False
  835. return self.block_manager.can_append_slots(
  836. seq_group=seq_group,
  837. num_lookahead_slots=self._get_num_lookahead_slots(is_prefill),
  838. )
  839. def schedule(self) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs]:
  840. # Schedule sequence groups.
  841. # This function call changes the internal states of the scheduler
  842. # such as self.running, self.swapped, and self.waiting.
  843. scheduler_outputs = self._schedule()
  844. now = time.time()
  845. # Create input data structures.
  846. seq_group_metadata_list: List[SequenceGroupMetadata] = []
  847. for i, scheduled_seq_group in enumerate(
  848. scheduler_outputs.scheduled_seq_groups):
  849. seq_group = scheduled_seq_group.seq_group
  850. token_chunk_size = scheduled_seq_group.token_chunk_size
  851. seq_group.maybe_set_first_scheduled_time(now)
  852. # seq_id -> SequenceData
  853. seq_data: Dict[int, SequenceData] = {}
  854. # seq_id -> physical block numbers
  855. block_tables: Dict[int, List[int]] = {}
  856. for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
  857. seq_id = seq.seq_id
  858. seq_data[seq_id] = seq.data
  859. block_tables[seq_id] = self.block_manager.get_block_table(seq)
  860. self.block_manager.access_all_blocks_in_seq(seq, now)
  861. common_computed_block_nums = (
  862. self.block_manager.get_common_computed_block_ids(
  863. seq_group.get_seqs(status=SequenceStatus.RUNNING)))
  864. do_sample = True
  865. if seq_group.is_prefill():
  866. seqs = seq_group.get_seqs()
  867. # Prefill has only 1 sequence.
  868. assert len(seqs) == 1
  869. # In the next iteration, all prompt tokens are not computed.
  870. # It means the prefill is chunked, and we don't need sampling.
  871. # NOTE: We use get_len instead of get_prompt_len because when
  872. # a sequence is preempted, prefill includes previous generated
  873. # output tokens.
  874. if (token_chunk_size + seqs[0].data.get_num_computed_tokens() <
  875. seqs[0].data.get_len()):
  876. do_sample = False
  877. # It assumes the scheduled_seq_groups is ordered by
  878. # prefill < decoding.
  879. is_prompt = seq_group.is_prefill()
  880. seq_group_metadata = SequenceGroupMetadata(
  881. request_id=seq_group.request_id,
  882. is_prompt=is_prompt,
  883. seq_data=seq_data,
  884. sampling_params=seq_group.sampling_params,
  885. block_tables=block_tables,
  886. do_sample=do_sample,
  887. pooling_params=seq_group.pooling_params,
  888. token_chunk_size=token_chunk_size,
  889. lora_request=seq_group.lora_request,
  890. computed_block_nums=common_computed_block_nums,
  891. state=seq_group.state,
  892. # `multi_modal_data` will only be present for the 1st comm
  893. # between engine and worker.
  894. # the subsequent comms can still use delta, but
  895. # `multi_modal_data` will be None.
  896. multi_modal_data=seq_group.multi_modal_data
  897. if scheduler_outputs.num_prefill_groups > 0 else None,
  898. )
  899. seq_group_metadata_list.append(seq_group_metadata)
  900. # Now that the batch has been created, we can assume all blocks in the
  901. # batch will have been computed before the next scheduling invocation.
  902. # This is because the engine assumes that a failure in model execution
  903. # will crash the Aphrodite instance / will not retry.
  904. for scheduled_seq_group in scheduler_outputs.scheduled_seq_groups:
  905. self.block_manager.mark_blocks_as_computed(
  906. scheduled_seq_group.seq_group)
  907. return seq_group_metadata_list, scheduler_outputs
  908. def fork_seq(self, parent_seq: Sequence, child_seq: Sequence) -> None:
  909. self.block_manager.fork(parent_seq, child_seq)
  910. def free_seq(self, seq: Sequence) -> None:
  911. """Free a sequence from a block table."""
  912. self.block_manager.free(seq)
  913. def free_finished_seq_groups(self) -> None:
  914. for queue in [self.running, self.swapped, self.waiting]:
  915. self._finished_requests_ids += [
  916. seq_group.request_id for seq_group in queue
  917. if seq_group.is_finished()
  918. ]
  919. self.running = deque(seq_group for seq_group in self.running
  920. if not seq_group.is_finished())
  921. def _allocate_and_set_running(self, seq_group: SequenceGroup) -> None:
  922. self.block_manager.allocate(seq_group)
  923. for seq in seq_group.get_seqs(status=SequenceStatus.WAITING):
  924. seq.status = SequenceStatus.RUNNING
  925. def _append_slots(
  926. self,
  927. seq_group: SequenceGroup,
  928. blocks_to_copy: List[Tuple[int, int]],
  929. ) -> None:
  930. """Appends new slots to the sequences in the given sequence group.
  931. Args:
  932. seq_group (SequenceGroup): The sequence group containing the
  933. sequences to append slots to.
  934. blocks_to_copy (List[Tuple[int, int]]): A list of tuple of two
  935. ints, the first int is the source block index, and the second
  936. int is the destination block index. This list is updated with
  937. the new source and destination block indices for the appended
  938. slots.
  939. """
  940. num_lookahead_slots = self._get_num_lookahead_slots(is_prefill=False)
  941. for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
  942. cows = self.block_manager.append_slots(seq, num_lookahead_slots)
  943. blocks_to_copy.extend(cows)
  944. def _preempt(
  945. self,
  946. seq_group: SequenceGroup,
  947. blocks_to_swap_out: List[Tuple[int, int]],
  948. preemption_mode: Optional[PreemptionMode] = None,
  949. ) -> PreemptionMode:
  950. # If preemption mode is not specified, we determine the mode as follows:
  951. # We use recomputation by default since it incurs lower overhead than
  952. # swapping. However, when the sequence group has multiple sequences
  953. # (e.g., beam search), recomputation is not currently supported. In
  954. # such a case, we use swapping instead.
  955. # FIXME: This makes our scheduling policy a bit bizarre.
  956. # As swapped sequences are prioritized over waiting sequences,
  957. # sequence groups with multiple sequences are implicitly prioritized
  958. # over sequence groups with a single sequence.
  959. # TODO: Support recomputation for sequence groups with multiple
  960. # sequences. This may require a more sophisticated CUDA kernel.
  961. if self.user_specified_preemption_mode is None:
  962. if seq_group.get_max_num_running_seqs() == 1:
  963. preemption_mode = PreemptionMode.RECOMPUTE
  964. else:
  965. preemption_mode = PreemptionMode.SWAP
  966. elif self.user_specified_preemption_mode == "swap":
  967. preemption_mode = PreemptionMode.SWAP
  968. else:
  969. preemption_mode = PreemptionMode.RECOMPUTE
  970. if self.num_cumulative_preemption % 50 == 0:
  971. logger.warning(
  972. f"Sequence group {seq_group.request_id} is preempted by "
  973. f"{preemption_mode} mode because there is "
  974. "not enough KV cache space. This can affect the end-to-end "
  975. "performance. Increase gpu_memory_utilization or "
  976. "tensor_parallel_size to provide more KV cache memory. "
  977. "total_num_cumulative_preemption="
  978. f"{self.num_cumulative_preemption + 1}")
  979. self.num_cumulative_preemption += 1
  980. if preemption_mode == PreemptionMode.RECOMPUTE:
  981. self._preempt_by_recompute(seq_group)
  982. elif preemption_mode == PreemptionMode.SWAP:
  983. self._preempt_by_swap(seq_group, blocks_to_swap_out)
  984. else:
  985. raise AssertionError("Invalid preemption mode.")
  986. return preemption_mode
  987. def _preempt_by_recompute(
  988. self,
  989. seq_group: SequenceGroup,
  990. ) -> None:
  991. seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
  992. assert len(seqs) == 1
  993. for seq in seqs:
  994. seq.status = SequenceStatus.WAITING
  995. self.free_seq(seq)
  996. seq.reset_state_for_recompute()
  997. def _preempt_by_swap(
  998. self,
  999. seq_group: SequenceGroup,
  1000. blocks_to_swap_out: List[Tuple[int, int]],
  1001. ) -> None:
  1002. self._swap_out(seq_group, blocks_to_swap_out)
  1003. def _swap_in(
  1004. self,
  1005. seq_group: SequenceGroup,
  1006. blocks_to_swap_in: List[Tuple[int, int]],
  1007. ) -> None:
  1008. mapping = self.block_manager.swap_in(seq_group)
  1009. blocks_to_swap_in.extend(mapping)
  1010. for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED):
  1011. seq.status = SequenceStatus.RUNNING
  1012. def _swap_out(
  1013. self,
  1014. seq_group: SequenceGroup,
  1015. blocks_to_swap_out: List[Tuple[int, int]],
  1016. ) -> None:
  1017. if not self.block_manager.can_swap_out(seq_group):
  1018. # FIXME: Abort the sequence group instead of aborting the
  1019. # entire engine.
  1020. raise RuntimeError(
  1021. "Aborted due to the lack of CPU swap space. Please increase "
  1022. "the swap space to avoid this error.")
  1023. mapping = self.block_manager.swap_out(seq_group)
  1024. blocks_to_swap_out.extend(mapping)
  1025. for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
  1026. seq.status = SequenceStatus.SWAPPED
  1027. def _passed_delay(self, now: float) -> bool:
  1028. if self.prev_prompt:
  1029. self.last_prompt_latency = now - self.prev_time
  1030. self.prev_time, self.prev_prompt = now, False
  1031. # Delay scheduling prompts to let waiting queue fill up
  1032. if self.scheduler_config.delay_factor > 0 and self.waiting:
  1033. earliest_arrival_time = min(
  1034. [e.metrics.arrival_time for e in self.waiting])
  1035. passed_delay = (
  1036. (now - earliest_arrival_time) >
  1037. (self.scheduler_config.delay_factor * self.last_prompt_latency)
  1038. or not self.running)
  1039. else:
  1040. passed_delay = True
  1041. return passed_delay
  1042. def _get_num_lookahead_slots(self, is_prefill: bool) -> int:
  1043. """The number of slots to allocate per sequence per step, beyond known
  1044. token ids. Speculative decoding uses these slots to store KV activations
  1045. of tokens which may or may not be accepted.
  1046. Speculative decoding does not yet support prefill, so we do not perform
  1047. lookahead allocation for prefill.
  1048. """
  1049. if is_prefill:
  1050. return 0
  1051. return self.scheduler_config.num_lookahead_slots
  1052. def _get_num_new_tokens(self, seq_group: SequenceGroup,
  1053. status: SequenceStatus, enable_chunking: bool,
  1054. budget: SchedulingBudget) -> int:
  1055. """Get the next new tokens to compute for a given sequence group
  1056. that's in a given `status`.
  1057. The API could chunk the number of tokens to compute based on `budget`
  1058. if `enable_chunking` is True. If a sequence group has multiple
  1059. sequences (e.g., running beam search), it means it is in decoding
  1060. phase, so chunking doesn't happen.
  1061. Returns 0 if the new token cannot be computed due to token budget.
  1062. """
  1063. num_new_tokens = 0
  1064. seqs = seq_group.get_seqs(status=status)
  1065. for seq in seqs:
  1066. num_new_tokens += seq.get_num_new_tokens()
  1067. assert num_new_tokens > 0
  1068. # Chunk if a running request cannot fit in.
  1069. # If number of seq > 1, it means it is doing beam search in a
  1070. # decode phase. Do not chunk in that case.
  1071. if enable_chunking and len(seqs) == 1:
  1072. num_new_tokens = min(num_new_tokens,
  1073. budget.remaining_token_budget())
  1074. return num_new_tokens