block_manager.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. """A block manager that manages token blocks."""
  2. from typing import Dict, List, Optional, Set, Tuple
  3. from aphrodite.common.block import PhysicalTokenBlock
  4. from aphrodite.common.sequence import Sequence, SequenceGroup, SequenceStatus
  5. from aphrodite.common.utils import Device
  6. class BlockAllocator:
  7. """Manages free physical token blocks for a device.
  8. The allocator maintains a list of free blocks and allocates a block when
  9. requested. When a block is freed, its reference count is decremented. If
  10. the reference count becomes zero, the block is added back to the free list.
  11. """
  12. def __init__(
  13. self,
  14. device: Device,
  15. block_size: int,
  16. num_blocks: int,
  17. ) -> None:
  18. self.device = device
  19. self.block_size = block_size
  20. self.num_blocks = num_blocks
  21. # Initialize the free blocks.
  22. self.free_blocks: List[PhysicalTokenBlock] = []
  23. for i in range(num_blocks):
  24. block = PhysicalTokenBlock(device=device,
  25. block_number=i,
  26. block_size=block_size)
  27. self.free_blocks.append(block)
  28. def allocate(self) -> PhysicalTokenBlock:
  29. if not self.free_blocks:
  30. raise ValueError("Out of memory! No free blocks are available.")
  31. block = self.free_blocks.pop()
  32. block.ref_count = 1
  33. return block
  34. def free(self, block: PhysicalTokenBlock) -> None:
  35. if block.ref_count == 0:
  36. raise ValueError(f"Double free! {block} is already freed.")
  37. block.ref_count -= 1
  38. if block.ref_count == 0:
  39. self.free_blocks.append(block)
  40. def get_num_free_blocks(self) -> int:
  41. return len(self.free_blocks)
  42. # Mapping: logical block number -> physical block.
  43. BlockTable = List[PhysicalTokenBlock]
  44. class BlockSpaceManager:
  45. """Manages the mapping between logical and physical token blocks."""
  46. def __init__(
  47. self,
  48. block_size: int,
  49. num_gpu_blocks: int,
  50. num_cpu_blocks: int,
  51. watermark: float = 0.01,
  52. sliding_window: Optional[int] = None,
  53. ) -> None:
  54. self.block_size = block_size
  55. self.num_total_gpu_blocks = num_gpu_blocks
  56. self.num_total_cpu_blocks = num_cpu_blocks
  57. self.block_sliding_window = None
  58. if sliding_window is not None:
  59. assert sliding_window % block_size == 0, (sliding_window,
  60. block_size)
  61. self.block_sliding_window = sliding_window // block_size
  62. self.watermark = watermark
  63. assert watermark >= 0.0
  64. self.watermark_blocks = int(watermark * num_gpu_blocks)
  65. self.gpu_allocator = BlockAllocator(Device.GPU, block_size,
  66. num_gpu_blocks)
  67. self.cpu_allocator = BlockAllocator(Device.CPU, block_size,
  68. num_cpu_blocks)
  69. # Mapping: seq_id -> BlockTable.
  70. self.block_tables: Dict[int, BlockTable] = {}
  71. def can_allocate(self, seq_group: SequenceGroup) -> bool:
  72. # FIXME: Here we assume that all sequences in the group share
  73. # the same prompt. This may not be true for preempted sequences.
  74. seq = seq_group.get_seqs()[0]
  75. num_required_blocks = len(seq.logical_token_blocks)
  76. if self.block_sliding_window is not None:
  77. num_required_blocks = min(num_required_blocks,
  78. self.block_sliding_window)
  79. num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks()
  80. # Use watermark to avoid frequent cache eviction.
  81. return (num_free_gpu_blocks - num_required_blocks >=
  82. self.watermark_blocks)
  83. def allocate(self, seq_group: SequenceGroup) -> None:
  84. # NOTE: Here we assume that all sequences in the group have the same
  85. # prompt.
  86. seq = seq_group.get_seqs()[0]
  87. # Allocate new physical token blocks that will store the prompt tokens.
  88. block_table: BlockTable = []
  89. for logical_idx in range(len(seq.logical_token_blocks)):
  90. if (self.block_sliding_window is not None
  91. and logical_idx >= self.block_sliding_window):
  92. block = block_table[logical_idx % self.block_sliding_window]
  93. else:
  94. block = self.gpu_allocator.allocate()
  95. # Set the reference counts of the token blocks.
  96. block.ref_count = seq_group.num_seqs()
  97. block_table.append(block)
  98. # Assign the block table for each sequence.
  99. for seq in seq_group.get_seqs():
  100. self.block_tables[seq.seq_id] = block_table.copy()
  101. def can_append_slot(self, seq_group: SequenceGroup) -> bool:
  102. # Simple heuristic: If there is at least one free block
  103. # for each sequence, we can append.
  104. num_free_gpu_blocks = self.gpu_allocator.get_num_free_blocks()
  105. num_seqs = seq_group.num_seqs(status=SequenceStatus.RUNNING)
  106. return num_seqs <= num_free_gpu_blocks
  107. def append_slot(self, seq: Sequence) -> Optional[Tuple[int, int]]:
  108. """Allocate a physical slot for a new token."""
  109. logical_blocks = seq.logical_token_blocks
  110. block_table = self.block_tables[seq.seq_id]
  111. if len(block_table) < len(logical_blocks):
  112. # The sequence has a new logical block.
  113. # Allocate a new physical block.
  114. if (self.block_sliding_window
  115. and len(block_table) >= self.block_sliding_window):
  116. block_table.append(block_table[len(block_table) %
  117. self.block_sliding_window])
  118. else:
  119. block = self.gpu_allocator.allocate()
  120. block_table.append(block)
  121. return None
  122. # We want to append the token to the last physical block.
  123. last_block = block_table[-1]
  124. assert last_block.device == Device.GPU
  125. if last_block.ref_count == 1:
  126. # Not shared with other sequences. Appendable.
  127. return None
  128. else:
  129. # The last block is shared with other sequences.
  130. # Copy on Write: Allocate a new block and copy the tokens.
  131. new_block = self.gpu_allocator.allocate()
  132. block_table[-1] = new_block
  133. self.gpu_allocator.free(last_block)
  134. return last_block.block_number, new_block.block_number
  135. def fork(self, parent_seq: Sequence, child_seq: Sequence) -> None:
  136. # NOTE: fork does not allocate a new physical block.
  137. # Thus, it is always safe from OOM.
  138. src_block_table = self.block_tables[parent_seq.seq_id]
  139. self.block_tables[child_seq.seq_id] = src_block_table.copy()
  140. for block in src_block_table:
  141. block.ref_count += 1
  142. def _get_physical_blocks(
  143. self, seq_group: SequenceGroup) -> List[PhysicalTokenBlock]:
  144. # NOTE: Here, we assume that the physical blocks are only shared by
  145. # the sequences in the same group.
  146. blocks: Set[PhysicalTokenBlock] = set()
  147. for seq in seq_group.get_seqs():
  148. if seq.is_finished():
  149. continue
  150. blocks.update(self.block_tables[seq.seq_id])
  151. return list(blocks)
  152. def can_swap_in(self, seq_group: SequenceGroup) -> bool:
  153. blocks = self._get_physical_blocks(seq_group)
  154. num_swapped_seqs = seq_group.num_seqs(status=SequenceStatus.SWAPPED)
  155. num_free_blocks = self.gpu_allocator.get_num_free_blocks()
  156. # NOTE: Conservatively, we assume that every sequence will allocate
  157. # at least one free block right after the swap-in.
  158. # NOTE: This should match the logic in can_append_slot().
  159. num_required_blocks = len(blocks) + num_swapped_seqs
  160. return num_free_blocks - num_required_blocks >= self.watermark_blocks
  161. def swap_in(self, seq_group: SequenceGroup) -> Dict[int, int]:
  162. # CPU block -> GPU block.
  163. mapping: Dict[PhysicalTokenBlock, PhysicalTokenBlock] = {}
  164. for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED):
  165. new_block_table: BlockTable = []
  166. block_table = self.block_tables[seq.seq_id]
  167. for cpu_block in block_table:
  168. if cpu_block in mapping:
  169. gpu_block = mapping[cpu_block]
  170. gpu_block.ref_count += 1
  171. else:
  172. gpu_block = self.gpu_allocator.allocate()
  173. mapping[cpu_block] = gpu_block
  174. new_block_table.append(gpu_block)
  175. # Free the CPU block swapped in to GPU.
  176. self.cpu_allocator.free(cpu_block)
  177. self.block_tables[seq.seq_id] = new_block_table
  178. block_number_mapping = {
  179. cpu_block.block_number: gpu_block.block_number
  180. for cpu_block, gpu_block in mapping.items()
  181. }
  182. return block_number_mapping
  183. def can_swap_out(self, seq_group: SequenceGroup) -> bool:
  184. blocks = self._get_physical_blocks(seq_group)
  185. return len(blocks) <= self.cpu_allocator.get_num_free_blocks()
  186. def swap_out(self, seq_group: SequenceGroup) -> Dict[int, int]:
  187. # GPU block -> CPU block.
  188. mapping: Dict[PhysicalTokenBlock, PhysicalTokenBlock] = {}
  189. for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
  190. new_block_table: BlockTable = []
  191. block_table = self.block_tables[seq.seq_id]
  192. for gpu_block in block_table:
  193. if gpu_block in mapping:
  194. cpu_block = mapping[gpu_block]
  195. cpu_block.ref_count += 1
  196. else:
  197. cpu_block = self.cpu_allocator.allocate()
  198. mapping[gpu_block] = cpu_block
  199. new_block_table.append(cpu_block)
  200. # Free the GPU block swapped out to CPU.
  201. self.gpu_allocator.free(gpu_block)
  202. self.block_tables[seq.seq_id] = new_block_table
  203. block_number_mapping = {
  204. gpu_block.block_number: cpu_block.block_number
  205. for gpu_block, cpu_block in mapping.items()
  206. }
  207. return block_number_mapping
  208. def _free_block_table(self, block_table: BlockTable) -> None:
  209. for block in set(block_table):
  210. if block.device == Device.GPU:
  211. self.gpu_allocator.free(block)
  212. else:
  213. self.cpu_allocator.free(block)
  214. def free(self, seq: Sequence) -> None:
  215. if seq.seq_id not in self.block_tables:
  216. # Already freed or haven't been scheduled yet.
  217. return
  218. block_table = self.block_tables[seq.seq_id]
  219. self._free_block_table(block_table)
  220. del self.block_tables[seq.seq_id]
  221. def reset(self) -> None:
  222. for block_table in self.block_tables.values():
  223. self._free_block_table(block_table)
  224. self.block_tables.clear()
  225. def get_block_table(self, seq: Sequence) -> List[int]:
  226. block_table = self.block_tables[seq.seq_id]
  227. return [block.block_number for block in block_table]
  228. def get_num_free_gpu_blocks(self) -> int:
  229. return self.gpu_allocator.get_num_free_blocks()
  230. def get_num_free_cpu_blocks(self) -> int:
  231. return self.cpu_allocator.get_num_free_blocks()