logits_processor.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. """A layer that compute logits from hidden_stats."""
  2. from typing import Optional
  3. import torch
  4. import torch.nn as nn
  5. from aphrodite.distributed import tensor_model_parallel_gather
  6. from aphrodite.modeling.sampling_metadata import SamplingMetadata
  7. class LogitsProcessor(nn.Module):
  8. """Process logits and apply logits processors from sampling metadata.
  9. This layer does the following:
  10. 1. Gather logits from model hidden_states.
  11. 2. Scale logits if needed.
  12. 3. Apply logits processors (if any).
  13. """
  14. def __init__(self,
  15. vocab_size: int,
  16. org_vocab_size: Optional[int] = None,
  17. scale: Optional[float] = 1.0,
  18. logits_as_input: bool = False) -> None:
  19. """
  20. Args:
  21. scale: A scaling factor to apply to the logits.
  22. """
  23. super().__init__()
  24. self.scale = scale
  25. self.vocab_size = vocab_size
  26. # Whether the input is logits (default is hidden states).
  27. self.logits_as_input = logits_as_input
  28. # original vocabulary size (without LoRA).
  29. if org_vocab_size is not None:
  30. self.org_vocab_size = min(org_vocab_size, vocab_size)
  31. else:
  32. self.org_vocab_size = vocab_size
  33. def forward(
  34. self,
  35. lm_head: nn.Module,
  36. hidden_states: torch.Tensor,
  37. sampling_metadata: SamplingMetadata,
  38. embedding_bias: Optional[torch.Tensor] = None,
  39. ) -> torch.Tensor:
  40. if self.logits_as_input:
  41. logits = hidden_states
  42. else:
  43. hidden_states = _prune_hidden_states(hidden_states,
  44. sampling_metadata)
  45. # Get the logits for the next tokens.
  46. logits = self._get_logits(hidden_states, lm_head, embedding_bias)
  47. if logits is not None:
  48. logits *= self.scale
  49. # Apply logits processors (if any).
  50. logits = _apply_logits_processors(logits, sampling_metadata)
  51. return logits
  52. def _get_logits(self, hidden_states: torch.Tensor, lm_head: nn.Module,
  53. embedding_bias: Optional[torch.Tensor]) -> torch.Tensor:
  54. # Get the logits for the next tokens.
  55. logits = lm_head(hidden_states)
  56. if embedding_bias is not None:
  57. logits += embedding_bias
  58. logits = tensor_model_parallel_gather(logits)
  59. # Remove paddings in vocab (if any).
  60. if logits is not None:
  61. logits = logits[:, :self.org_vocab_size]
  62. return logits
  63. def _prune_hidden_states(
  64. hidden_states: torch.Tensor,
  65. sampling_metadata: SamplingMetadata,
  66. ) -> torch.Tensor:
  67. return hidden_states.index_select(0,
  68. sampling_metadata.selected_token_indices)
  69. def _apply_logits_processors(
  70. logits: torch.Tensor,
  71. sampling_metadata: SamplingMetadata,
  72. ) -> torch.Tensor:
  73. logits_row_idx = 0
  74. found_logits_processors = False
  75. for i, seq_group in enumerate(sampling_metadata.seq_groups):
  76. seq_ids, sampling_params = seq_group
  77. logits_processors = sampling_params.logits_processors
  78. # handle prompt_logprobs by skipping rows in logits added for
  79. # the prompt tokens (prompt logprobs are not processed)
  80. if (i < sampling_metadata.num_prompts
  81. and sampling_params.prompt_logprobs is not None):
  82. assert len(seq_ids) == 1
  83. logits_row_idx += sampling_metadata.prompt_lens[i] - 1
  84. if logits_processors:
  85. found_logits_processors = True
  86. for seq_id in seq_ids:
  87. logits_row = logits[logits_row_idx]
  88. token_ids = sampling_metadata.seq_data[seq_id].output_token_ids
  89. for logits_processor in logits_processors:
  90. logits_row = logits_processor(token_ids, logits_row)
  91. logits[logits_row_idx] = logits_row
  92. logits_row_idx += 1
  93. else:
  94. logits_row_idx += len(seq_ids)
  95. if found_logits_processors:
  96. # Ensure that no rows in logits were unexpectedly skipped.
  97. assert logits_row_idx == logits.shape[0]
  98. return logits