metadata.py 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. from typing import Dict, List, Tuple
  2. import torch
  3. from xformers.ops import AttentionBias
  4. from aphrodite.common.sampling_params import SamplingParams
  5. from aphrodite.common.sequence import SequenceData
  6. class InputMetadata:
  7. """Metadata for input sequences. Used for PagedAttention.
  8. Args:
  9. seq_groups: List of (seq_ids, sampling_params).
  10. seq_data: Seq_id -> SequenceData.
  11. prompt_lens: Lengths of prompts.
  12. slot_mapping: The address to write the new KV to of each token.
  13. context_lens: the length of attention context for each generation token.
  14. max_context_len: The maximum context length.
  15. block_tables: The block tables. (Seq id -> list of physical block)
  16. """
  17. def __init__(
  18. self,
  19. seq_groups: List[Tuple[List[int], SamplingParams]],
  20. seq_data: Dict[int, SequenceData],
  21. prompt_lens: List[int],
  22. slot_mapping: torch.Tensor,
  23. context_lens: torch.Tensor,
  24. max_context_len: int,
  25. block_tables: torch.Tensor,
  26. ) -> None:
  27. self.seq_groups = seq_groups
  28. self.seq_data = seq_data
  29. self.prompt_lens = prompt_lens
  30. self.slot_mapping = slot_mapping
  31. self.context_lens = context_lens
  32. self.max_context_len = max_context_len
  33. self.block_tables = block_tables
  34. self.num_prompts = len(prompt_lens)
  35. self.num_prompt_tokens = sum(prompt_lens)
  36. self.num_generation_tokens = context_lens.shape[0]
  37. self.num_valid_tokens = slot_mapping.shape[0]
  38. if block_tables.numel() > 0:
  39. self.max_num_blocks_per_seq = block_tables.shape[1]
  40. else:
  41. self.max_num_blocks_per_seq = 0
  42. assert block_tables.shape[0] == self.num_generation_tokens
  43. assert context_lens.shape[0] == self.num_generation_tokens
  44. # Set during the execution of the first attention op.
  45. self.attn_bias: List[AttentionBias] = []
  46. def __repr__(self) -> str:
  47. # Print only useful metadata.
  48. return (f'InputMetadata('
  49. f'num_valid_tokens={self.num_valid_tokens}, '
  50. f'num_prompt_tokens={self.num_prompt_tokens}, '
  51. f'num_prompts={self.num_prompts}, '
  52. f'prompt_lens={self.prompt_lens}, '
  53. f'num_generation_tokens={self.num_generation_tokens}, '
  54. f'context_lens={self.context_lens}, '
  55. f'max_context_len={self.max_context_len}), '
  56. f'max_num_blocks_per_seq={self.max_num_blocks_per_seq}, '
  57. f'block_tables={self.block_tables}), '
  58. f'slot_mapping={self.slot_mapping}')