util.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. from contextlib import contextmanager
  2. from itertools import chain
  3. from typing import List, Tuple
  4. import torch
  5. from aphrodite.common.sequence import SamplerOutput, SequenceGroupMetadata
  6. SeqId = int
  7. def get_all_seq_ids(
  8. seq_group_metadata_list: List[SequenceGroupMetadata]) -> List[SeqId]:
  9. """Given a list of SequenceGroupMetadata, create a list of all
  10. sequence ids.
  11. """
  12. return list(
  13. chain.from_iterable([
  14. seq_group_metadata.seq_data.keys()
  15. for seq_group_metadata in seq_group_metadata_list
  16. ]))
  17. def split_batch_by_proposal_len(
  18. seq_group_metadata_list: List[SequenceGroupMetadata],
  19. proposal_lens: List[int], select_proposal_len_zero: bool
  20. ) -> Tuple[List[SequenceGroupMetadata], List[int]]:
  21. """Utility function that splits a batch based on whether the proposal len is
  22. zero or not. We should remove this once Aphrodite supports per-sequence
  23. proposal lens in a batch.
  24. """
  25. if select_proposal_len_zero:
  26. predicate = lambda proposal_len: proposal_len == 0
  27. else:
  28. predicate = lambda proposal_len: proposal_len != 0
  29. indices = [
  30. i for i, (_, proposal_len
  31. ) in enumerate(zip(seq_group_metadata_list, proposal_lens))
  32. if predicate(proposal_len)
  33. ]
  34. seq_groups = [
  35. seq_group for seq_group, proposal_len in zip(
  36. seq_group_metadata_list, proposal_lens) if predicate(proposal_len)
  37. ]
  38. return seq_groups, indices
  39. def sampler_output_to_torch(
  40. sampler_output_list: List[SamplerOutput],
  41. sampler_transposed: bool) -> Tuple[torch.Tensor, torch.Tensor]:
  42. """Utility function which converts a list of SamplerOutput to tensors.
  43. Returns:
  44. sampled_token_ids: torch.Tensor
  45. shape: [batch_size, len(sampler_output_list)]
  46. sampled_token_probs: torch.Tensor
  47. shape: [batch_size, len(sampler_output_list), vocab_size]
  48. """
  49. # shape: [batch_size, num_sampler_output, vocab_size]
  50. sampled_token_probs = torch.stack(
  51. [
  52. sampler_output.sampled_token_probs
  53. for sampler_output in sampler_output_list
  54. ],
  55. dim=0,
  56. )
  57. if sampler_transposed:
  58. sampled_token_probs = sampled_token_probs.transpose(0, 1)
  59. # shape: [batch_size, num_sampler_output]
  60. sampled_token_ids = torch.stack(
  61. [
  62. sampler_output.sampled_token_ids.flatten()
  63. for sampler_output in sampler_output_list
  64. ],
  65. dim=0,
  66. )
  67. if sampler_transposed:
  68. sampled_token_ids = sampled_token_ids.transpose(0, 1)
  69. return sampled_token_ids, sampled_token_probs
  70. def maybe_mock_device_tensors(sampler_output: SamplerOutput, batch_size: int,
  71. vocab_size: int, device: str) -> None:
  72. """Helper method which mocks out the GPU tensors in SamplerOutput with dummy
  73. values.
  74. """
  75. values = [
  76. sampler_output.sampled_token_probs, sampler_output.sampled_token_ids
  77. ]
  78. assert all(v is None for v in values) or not any(v is None for v in values)
  79. if not any(v is None for v in values):
  80. # Do nothing if the tensors are already created (usually in unit tests).
  81. return
  82. # Softmax to ensure valid probs.
  83. sampler_output.sampled_token_probs = torch.nn.functional.softmax(
  84. torch.rand(batch_size, vocab_size, dtype=torch.float32, device=device),
  85. dim=-1)
  86. sampler_output.sampled_token_ids = torch.randint(low=10,
  87. high=100,
  88. size=(batch_size, ),
  89. dtype=torch.long,
  90. device=device)
  91. @contextmanager
  92. def nvtx_range(msg, *args, **kwargs):
  93. """
  94. Context manager / decorator that pushes an NVTX range at the beginning
  95. of its scope, and pops it at the end. If extra arguments are given,
  96. they are passed as arguments to msg.format().
  97. If running with cuda graphs, you must enable nsys cuda graph profiling.
  98. Arguments:
  99. msg (string): message to associate with the range
  100. """
  101. torch.cuda.nvtx.range_push(msg.format(*args, **kwargs))
  102. try:
  103. yield
  104. finally:
  105. torch.cuda.nvtx.range_pop()