flashinfer.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. from dataclasses import dataclass
  2. from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Type
  3. try:
  4. from flashinfer import BatchDecodeWithPagedKVCacheWrapper
  5. from flashinfer.prefill import BatchPrefillWithPagedKVCacheWrapper
  6. import aphrodite.attention.backends.flash_attn # noqa
  7. except ImportError:
  8. BatchDecodeWithPagedKVCacheWrapper = None
  9. BatchPrefillWithPagedKVCacheWrapper = None
  10. import torch
  11. from aphrodite import _custom_ops as ops
  12. from aphrodite.attention.backends.abstract import (AttentionBackend,
  13. AttentionImpl,
  14. AttentionMetadata,
  15. AttentionMetadataBuilder,
  16. AttentionType)
  17. from aphrodite.attention.backends.utils import (PAD_SLOT_ID,
  18. compute_slot_mapping,
  19. compute_slot_mapping_start_idx,
  20. is_block_tables_empty)
  21. from aphrodite.attention.ops.paged_attn import PagedAttention
  22. from aphrodite.common.utils import (async_tensor_h2d, get_kv_cache_torch_dtype,
  23. make_tensor_with_pad)
  24. if TYPE_CHECKING:
  25. from aphrodite.task_handler.model_runner import ModelInputForGPUBuilder
  26. class FlashInferBackend(AttentionBackend):
  27. @staticmethod
  28. def get_name() -> str:
  29. return "flashinfer"
  30. @staticmethod
  31. def get_impl_cls() -> Type["FlashInferImpl"]:
  32. return FlashInferImpl
  33. @staticmethod
  34. def get_metadata_cls() -> Type["AttentionMetadata"]:
  35. return FlashInferMetadata
  36. @staticmethod
  37. def get_builder_cls() -> Type["FlashInferMetadataBuilder"]:
  38. return FlashInferMetadataBuilder
  39. @staticmethod
  40. def get_kv_cache_shape(
  41. num_blocks: int,
  42. block_size: int,
  43. num_kv_heads: int,
  44. head_size: int,
  45. ) -> Tuple[int, ...]:
  46. return (num_blocks, 2, block_size, num_kv_heads, head_size)
  47. @staticmethod
  48. def swap_blocks(
  49. src_kv_cache: torch.Tensor,
  50. dst_kv_cache: torch.Tensor,
  51. src_to_dst: torch.Tensor,
  52. ) -> None:
  53. PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst)
  54. @staticmethod
  55. def copy_blocks(
  56. kv_caches: List[torch.Tensor],
  57. src_to_dists: torch.Tensor,
  58. ) -> None:
  59. PagedAttention.copy_blocks(kv_caches, src_to_dists)
  60. @staticmethod
  61. def get_supported_head_sizes() -> List[int]:
  62. return [64, 128, 256]
  63. @dataclass
  64. class FlashInferMetadata(AttentionMetadata):
  65. # Maximum sequence length among prefill batch. 0 if there are decoding
  66. # requests only.
  67. max_prefill_seq_len: int
  68. use_cuda_graph: bool = True
  69. prefill_wrapper: Optional[BatchPrefillWithPagedKVCacheWrapper] = None
  70. decode_wrapper: Optional[BatchDecodeWithPagedKVCacheWrapper] = None
  71. # Metadata for the prefill stage
  72. seq_start_loc: Optional[torch.Tensor] = None
  73. query_start_loc: Optional[torch.Tensor] = None
  74. block_tables: Optional[torch.Tensor] = None
  75. # An example for paged_kv_indices, paged_kv_indptr:
  76. # request 1, page indices [0, 5, 8]
  77. # request 2, page indices [1, 6, 7]
  78. # request 3, page indices [3, 4]
  79. # paged_kv_indices is a concatenation of page indices of all requests:
  80. # [0, 5, 8, 1, 6, 7, 3, 4]
  81. # paged_kv_indptr is used to index into paged_kv_indices:
  82. # [0, 3, 6, 8]
  83. # The indptr of the paged kv cache, shape: [batch_size + 1]
  84. paged_kv_indptr: Optional[torch.Tensor] = None
  85. # The page indices of the paged kv cache
  86. paged_kv_indices: Optional[torch.Tensor] = None
  87. # The number of entries in the last page of each request in
  88. # the paged kv cache, shape: [batch_size]
  89. paged_kv_last_page_len: Optional[torch.Tensor] = None
  90. # The number of query/output heads
  91. num_qo_heads: Optional[int] = None
  92. # The number of key/value heads
  93. num_kv_heads: Optional[int] = None
  94. # The dimension of the attention heads
  95. head_dim: Optional[int] = None
  96. # Block size of Aphrodite
  97. page_size: Optional[int] = None
  98. # The data type of the paged kv cache
  99. data_type: torch.dtype = None
  100. device: torch.device = torch.device("cuda")
  101. is_profile_run: bool = False
  102. def __post_init__(self):
  103. # Refer to
  104. # https://github.com/flashinfer-ai/flashinfer/blob/3d55c71a62052c590c130897d3a3db49b14fcc34/include/flashinfer/utils.cuh#L157
  105. supported_head_sizes = FlashInferBackend.get_supported_head_sizes()
  106. if self.head_dim is not None and self.head_dim \
  107. not in supported_head_sizes:
  108. raise ValueError(
  109. f"Only {supported_head_sizes} are supported for head_dim,",
  110. f"received {self.head_dim}.")
  111. def begin_forward(self):
  112. if self.num_prefill_tokens > 0:
  113. if self.paged_kv_indices is None:
  114. return
  115. assert self.prefill_wrapper is not None
  116. assert self.query_start_loc is not None
  117. assert self.paged_kv_indices is not None
  118. assert self.paged_kv_indptr is not None
  119. assert self.paged_kv_last_page_len is not None
  120. batch_size = self.query_start_loc.shape[0] - 1
  121. assert batch_size >= 0
  122. # We will use flash attention for profiling to
  123. # determine the number of blocks. Therefore,
  124. # we don't need to prepare the input for flashinfer for profile run.
  125. if not self.is_profile_run:
  126. self.paged_kv_indptr = self.paged_kv_indptr.to(self.device)
  127. self.paged_kv_last_page_len = self.paged_kv_last_page_len.to(
  128. self.device)
  129. self.paged_kv_indices = self.paged_kv_indices.to(self.device)
  130. self.prefill_wrapper.end_forward()
  131. self.prefill_wrapper.begin_forward(
  132. self.query_start_loc, self.paged_kv_indptr,
  133. self.paged_kv_indices, self.paged_kv_last_page_len,
  134. self.num_qo_heads, self.num_kv_heads, self.head_dim,
  135. self.page_size)
  136. else:
  137. if not self.use_cuda_graph:
  138. assert self.paged_kv_indices is not None
  139. assert self.paged_kv_indptr is not None
  140. assert self.paged_kv_last_page_len is not None
  141. self.paged_kv_indices = self.paged_kv_indices.to(self.device)
  142. self.paged_kv_indptr = self.paged_kv_indptr.to(self.device)
  143. self.paged_kv_last_page_len = self.paged_kv_last_page_len.to(
  144. self.device)
  145. assert self.decode_wrapper is not None
  146. self.decode_wrapper.end_forward()
  147. self.decode_wrapper.begin_forward(
  148. self.paged_kv_indptr,
  149. self.paged_kv_indices,
  150. self.paged_kv_last_page_len,
  151. self.num_qo_heads,
  152. self.num_kv_heads,
  153. self.head_dim,
  154. self.page_size,
  155. # Disable flashinfer's pos encoding and use Aphrodite's rope.
  156. pos_encoding_mode="NONE",
  157. data_type=self.data_type)
  158. def asdict_zerocopy(self,
  159. skip_fields: Optional[Set[str]] = None
  160. ) -> Dict[str, Any]:
  161. if skip_fields is None:
  162. skip_fields = set()
  163. # We need to skip the prefill/decode_wrapper field since it cannot be
  164. # broadcasted with nccl when TP is enabled.
  165. skip_fields.add('prefill_wrapper')
  166. skip_fields.add('decode_wrapper')
  167. return super().asdict_zerocopy(skip_fields)
  168. @property
  169. def prefill_metadata(self) -> Optional["FlashInferMetadata"]:
  170. # Currently chunked prefill is not supported
  171. if self.num_decode_tokens == 0:
  172. assert self.num_prefills > 0
  173. return self
  174. return None
  175. @property
  176. def decode_metadata(self) -> Optional["FlashInferMetadata"]:
  177. # Currently chunked prefill is not supported
  178. if self.num_prefills > 0:
  179. assert self.num_decode_tokens == 0
  180. return None
  181. return self
  182. class FlashInferMetadataBuilder(AttentionMetadataBuilder[FlashInferMetadata]):
  183. def __init__(self, input_builder: "ModelInputForGPUBuilder"):
  184. self.slot_mapping: List[int] = []
  185. self.prefill_seq_lens: List[int] = []
  186. self.context_lens: List[int] = []
  187. self.block_tables: List[List[int]] = []
  188. self.curr_seq_lens: List[int] = []
  189. self.num_prefills = 0
  190. self.num_prefill_tokens = 0
  191. self.num_decode_tokens = 0
  192. self.input_builder = input_builder
  193. self.runner = input_builder.runner
  194. self.sliding_window = input_builder.sliding_window
  195. self.block_size = input_builder.block_size
  196. self.use_v2_block_manager = (
  197. input_builder.scheduler_config.use_v2_block_manager)
  198. # Please follow https://docs.flashinfer.ai/tutorials/kv_layout.html#page-layout
  199. # for the precise definition of the following fields.
  200. # An example:
  201. # request 1, page indices [0, 5, 8]
  202. # request 2, page indices [1, 6, 7]
  203. # request 3, page indices [3, 4]
  204. # paged_kv_indices is a concatenation of page indices of all requests:
  205. # [0, 5, 8, 1, 6, 7, 3, 4]
  206. # paged_kv_indptr is used to index into paged_kv_indices:
  207. # [0, 3, 6, 8]
  208. self.paged_kv_indices: List[int] = []
  209. # 0 at the beginning of paged_kv_indptr indicates the start of the
  210. # first request’s page indices in the paged_kv_indices list.
  211. self.paged_kv_indptr: List[int] = [0]
  212. # paged_kv_last_page_len is the length of the last page of each request
  213. self.paged_kv_last_page_len: List[int] = []
  214. self.is_profile_run: bool = False
  215. def _add_seq_group(
  216. self, inter_data: "ModelInputForGPUBuilder.InterDataForSeqGroup",
  217. chunked_prefill_enabled: bool):
  218. """Add a sequence group to the metadata. Specifically update/append
  219. 1. context length.
  220. 2. block table.
  221. 3. slot mapping.
  222. """
  223. is_prompt = inter_data.is_prompt
  224. block_tables = inter_data.block_tables
  225. computed_block_nums = inter_data.computed_block_nums
  226. for (seq_id, token_len, seq_len, curr_seq_len, query_len, context_len,
  227. curr_sliding_window_block) in zip(
  228. inter_data.seq_ids, [len(t) for t in inter_data.input_tokens],
  229. inter_data.orig_seq_lens, inter_data.seq_lens,
  230. inter_data.query_lens, inter_data.context_lens,
  231. inter_data.curr_sliding_window_blocks):
  232. self.context_lens.append(context_len)
  233. if is_prompt:
  234. self.num_prefills += 1
  235. self.num_prefill_tokens += token_len
  236. self.prefill_seq_lens.append(seq_len)
  237. else:
  238. assert query_len == 1, (
  239. "seq_len: {}, context_len: {}, query_len: {}".format(
  240. seq_len, context_len, query_len))
  241. self.num_decode_tokens += query_len
  242. self.curr_seq_lens.append(curr_seq_len)
  243. # Compute block table.
  244. # TODO: Combine chunked prefill and prefix caching by
  245. # only allowing multiple of block_size chunk size.
  246. # NOTE: This only works for oooooooxxx style attention.
  247. block_table = []
  248. if inter_data.prefix_cache_hit:
  249. block_table = computed_block_nums
  250. elif ((chunked_prefill_enabled or not is_prompt)
  251. and block_tables is not None):
  252. block_table = block_tables[seq_id][-curr_sliding_window_block:]
  253. self.block_tables.append(block_table)
  254. is_profile_run = is_block_tables_empty(block_tables)
  255. # Compute slot mapping.
  256. start_idx = compute_slot_mapping_start_idx(
  257. is_prompt, query_len, context_len, self.sliding_window,
  258. self.use_v2_block_manager)
  259. compute_slot_mapping(is_profile_run, self.slot_mapping, seq_id,
  260. seq_len, context_len, start_idx,
  261. self.block_size, inter_data.block_tables)
  262. # It is not necessary to add paged_kv_indices, paged_kv_indptr,
  263. # and paged_kv_last_page_len for profile run because we will
  264. # create dummy inputs.
  265. if is_profile_run:
  266. self.is_profile_run = is_profile_run
  267. return
  268. block_table = block_tables[seq_id]
  269. self._update_paged_kv_tensors(block_table, seq_len)
  270. def _update_paged_kv_tensors(self, block_table: List[int], seq_len: int):
  271. # Get the number of valid blocks based on sequence length.
  272. # If seq_len = 16, block_size = 16,
  273. # block_table_bound is 1 with 1 valid block.
  274. # If seq_len = 15, block_size = 16,
  275. # block_table_bound is 0 + 1 with 1 valid block.
  276. block_table_bound = seq_len // self.block_size + 1 \
  277. if seq_len % self.block_size != 0 \
  278. else seq_len // self.block_size
  279. self.paged_kv_indices.extend(block_table[:block_table_bound])
  280. self.paged_kv_indptr.append(self.paged_kv_indptr[-1] +
  281. block_table_bound)
  282. last_page_len = seq_len % self.block_size
  283. if last_page_len == 0:
  284. last_page_len = self.block_size
  285. self.paged_kv_last_page_len.append(last_page_len)
  286. def build(self, seq_lens: List[int], query_lens: List[int],
  287. cuda_graph_pad_size: int, batch_size: int):
  288. """Build attention metadata with on-device tensors.
  289. Args:
  290. seq_lens: The maybe padded sequence lengths of the input sequences.
  291. query_lens: The query lengths of the input sequences.
  292. cuda_graph_pad_size: The padding size for cuda graph.
  293. -1 if cuda graph is not used.
  294. batch_size: The maybe padded batch size.
  295. """
  296. for inter_data in self.input_builder.inter_data_list:
  297. self._add_seq_group(inter_data,
  298. self.input_builder.chunked_prefill_enabled)
  299. device = self.runner.device
  300. use_captured_graph = cuda_graph_pad_size != -1
  301. max_query_len = max(query_lens)
  302. max_prefill_seq_len = max(self.prefill_seq_lens, default=0)
  303. num_decode_tokens = self.num_decode_tokens
  304. if use_captured_graph:
  305. self.slot_mapping.extend([PAD_SLOT_ID] * cuda_graph_pad_size)
  306. self.block_tables.extend([] * cuda_graph_pad_size)
  307. num_decode_tokens = batch_size
  308. # The shape of graph_block_tables is
  309. # [max batch size, max context len // block size].
  310. input_block_tables = self.runner.graph_block_tables[:batch_size]
  311. for i, block_table in enumerate(self.block_tables):
  312. if block_table:
  313. input_block_tables[i, :len(block_table)] = block_table
  314. block_tables = torch.from_numpy(input_block_tables).to(
  315. device, non_blocking=True)
  316. last_paged_kv_indptr = self.paged_kv_indptr[-1]
  317. self.paged_kv_indptr.extend([last_paged_kv_indptr] *
  318. cuda_graph_pad_size)
  319. self.paged_kv_last_page_len.extend([0] * cuda_graph_pad_size)
  320. else:
  321. block_tables = make_tensor_with_pad(
  322. self.block_tables,
  323. pad=0,
  324. dtype=torch.int,
  325. device=device,
  326. )
  327. assert max_query_len > 0, ("query_lens: {}".format(query_lens))
  328. assert device is not None
  329. seq_lens_tensor = async_tensor_h2d(seq_lens, torch.int, device,
  330. self.runner.pin_memory)
  331. query_lens_tensor = async_tensor_h2d(query_lens, torch.long, device,
  332. self.runner.pin_memory)
  333. slot_mapping_tensor = async_tensor_h2d(self.slot_mapping, torch.long,
  334. device, self.runner.pin_memory)
  335. query_start_loc = torch.zeros(query_lens_tensor.shape[0] + 1,
  336. dtype=torch.int32,
  337. device=device)
  338. seq_start_loc = torch.zeros(seq_lens_tensor.shape[0] + 1,
  339. dtype=torch.int32,
  340. device=device)
  341. torch.cumsum(seq_lens_tensor,
  342. dim=0,
  343. dtype=seq_start_loc.dtype,
  344. out=seq_start_loc[1:])
  345. torch.cumsum(query_lens_tensor,
  346. dim=0,
  347. dtype=query_start_loc.dtype,
  348. out=query_start_loc[1:])
  349. if len(self.paged_kv_indptr) > 0:
  350. paged_kv_indices_tensor = torch.tensor(self.paged_kv_indices,
  351. device="cpu",
  352. dtype=torch.int)
  353. paged_kv_indptr_tensor = torch.tensor(self.paged_kv_indptr,
  354. device="cpu",
  355. dtype=torch.int)
  356. paged_kv_last_page_len_tensor = torch.tensor(
  357. self.paged_kv_last_page_len, device="cpu", dtype=torch.int)
  358. else:
  359. paged_kv_indices_tensor = None
  360. paged_kv_indptr_tensor = None
  361. paged_kv_last_page_len_tensor = None
  362. kv_cache_dtype = get_kv_cache_torch_dtype(
  363. self.runner.kv_cache_dtype, self.runner.model_config.dtype)
  364. return FlashInferMetadata(
  365. num_prefills=self.num_prefills,
  366. slot_mapping=slot_mapping_tensor,
  367. num_prefill_tokens=self.num_prefill_tokens,
  368. num_decode_tokens=num_decode_tokens,
  369. max_prefill_seq_len=max_prefill_seq_len,
  370. block_tables=block_tables,
  371. paged_kv_indptr=paged_kv_indptr_tensor,
  372. paged_kv_indices=paged_kv_indices_tensor,
  373. paged_kv_last_page_len=paged_kv_last_page_len_tensor,
  374. num_qo_heads=self.runner.model_config.get_num_attention_heads(
  375. self.runner.parallel_config),
  376. num_kv_heads=self.runner.model_config.get_num_kv_heads(
  377. self.runner.parallel_config),
  378. head_dim=self.runner.model_config.get_head_size(),
  379. page_size=self.block_size,
  380. seq_start_loc=seq_start_loc,
  381. query_start_loc=query_start_loc,
  382. device=device,
  383. data_type=kv_cache_dtype,
  384. use_cuda_graph=use_captured_graph,
  385. is_profile_run=self.is_profile_run)
  386. class FlashInferImpl(AttentionImpl):
  387. def __init__(
  388. self,
  389. num_heads: int,
  390. head_size: int,
  391. scale: float,
  392. num_kv_heads: int,
  393. alibi_slopes: Optional[List[float]],
  394. sliding_window: Optional[int],
  395. kv_cache_dtype: str,
  396. blocksparse_params: Optional[Dict[str, Any]] = None,
  397. logits_soft_cap: Optional[float] = None,
  398. ) -> None:
  399. assert blocksparse_params is None, ValueError(
  400. "FlashInfer does not support block-sparse attention.")
  401. self.num_heads = num_heads
  402. self.head_size = head_size
  403. self.scale = float(scale)
  404. self.num_kv_heads = num_kv_heads
  405. if alibi_slopes is not None:
  406. alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
  407. self.alibi_slopes = alibi_slopes
  408. if sliding_window is not None:
  409. raise ValueError("Sliding window is not supported in FlashInfer.")
  410. self.sliding_window = (-1, -1)
  411. self.kv_cache_dtype = kv_cache_dtype
  412. self.logits_soft_cap = logits_soft_cap
  413. assert self.num_heads % self.num_kv_heads == 0
  414. self.num_queries_per_kv = self.num_heads // self.num_kv_heads
  415. def forward(
  416. self,
  417. query: torch.Tensor,
  418. key: torch.Tensor,
  419. value: torch.Tensor,
  420. kv_cache: Optional[torch.Tensor],
  421. attn_metadata: FlashInferMetadata,
  422. k_scale: float = 1.0,
  423. v_scale: float = 1.0,
  424. attn_type: AttentionType = AttentionType.DECODER,
  425. ) -> torch.Tensor:
  426. assert k_scale == 1.0 and v_scale == 1.0, (
  427. "key/v_scale is not supported in FlashInfer.")
  428. if attn_type != AttentionType.DECODER:
  429. raise NotImplementedError("Encoder self-attention and "
  430. "encoder/decoder cross-attention "
  431. "are not implemented for "
  432. "FlashInferImpl")
  433. num_tokens, hidden_size = query.shape
  434. query = query.view(-1, self.num_heads, self.head_size)
  435. key = key.view(-1, self.num_kv_heads, self.head_size)
  436. value = value.view(-1, self.num_kv_heads, self.head_size)
  437. if attn_metadata.num_prefill_tokens > 0:
  438. assert attn_metadata.num_decode_tokens == 0, (
  439. "Chunked prefill is not supported with flashinfer yet.")
  440. if attn_metadata.num_decode_tokens > 0:
  441. assert attn_metadata.num_prefill_tokens == 0, (
  442. "Chunked prefill is not supported with flashinfer yet.")
  443. if kv_cache is not None:
  444. # Use the same reshape and cache kernel as flash attention.
  445. ops.reshape_and_cache_flash(
  446. key,
  447. value,
  448. kv_cache[:, 0],
  449. kv_cache[:, 1],
  450. attn_metadata.slot_mapping.flatten(),
  451. self.kv_cache_dtype,
  452. k_scale,
  453. v_scale,
  454. )
  455. query = query.contiguous(
  456. ) # Flashinfer requires query to be contiguous
  457. if prefill_meta := attn_metadata.prefill_metadata:
  458. # We will use flash attention for prefill
  459. # when kv_cache is not provided.
  460. # This happens when Aphrodite runs the profiling to
  461. # determine the number of blocks.
  462. if kv_cache is None:
  463. output = torch.ops.aphrodite.flash_attn_varlen_func(
  464. q=query,
  465. k=key,
  466. v=value,
  467. cu_seqlens_q=prefill_meta.seq_start_loc,
  468. cu_seqlens_k=prefill_meta.seq_start_loc,
  469. max_seqlen_q=prefill_meta.max_prefill_seq_len,
  470. max_seqlen_k=prefill_meta.max_prefill_seq_len,
  471. softmax_scale=self.scale,
  472. causal=True,
  473. window_size=self.sliding_window,
  474. alibi_slopes=self.alibi_slopes,
  475. )
  476. else:
  477. assert prefill_meta is not None
  478. assert prefill_meta.prefill_wrapper is not None
  479. output = prefill_meta.prefill_wrapper.forward(
  480. query,
  481. kv_cache,
  482. logits_soft_cap=self.logits_soft_cap,
  483. causal=True)
  484. else:
  485. assert attn_metadata.decode_metadata is not None
  486. assert attn_metadata.decode_metadata.decode_wrapper is not None
  487. output = attn_metadata.decode_metadata.decode_wrapper.forward(
  488. query,
  489. kv_cache,
  490. sm_scale=self.scale,
  491. logits_soft_cap=self.logits_soft_cap)
  492. return output.view(num_tokens, hidden_size)