1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078 |
- import contextlib
- import functools
- from typing import List, Optional, Tuple, Type, Union
- import torch
- from loguru import logger
- import aphrodite.common.envs as envs
- from aphrodite._core_ext import ScalarType
- from aphrodite.common.utils import is_hip
- from aphrodite.platforms import current_platform
- if not current_platform.is_tpu():
- try:
- import aphrodite._C
- except ImportError as e:
- logger.warning(f"Failed to import from aphrodite._C with {e}")
- if current_platform.is_rocm():
- import aphrodite._rocm_C # noqa: F401
- with contextlib.suppress(ImportError):
- # ruff: noqa: F401
- import aphrodite._moe_C
- def hint_on_error(fn):
- @functools.wraps(fn)
- def wrapper(*args, **kwargs):
- try:
- return fn(*args, **kwargs)
- except AttributeError as e:
- msg = (
- f"Error in calling custom op {fn.__name__}: {e}\n"
- f"Possibly you have built or installed an obsolete version of aphrodite.\n"
- f"Please try a clean build and install of aphrodite,"
- f"or remove old built files such as aphrodite/*.so and build/ ."
- )
- logger.error(msg)
- raise e
- return wrapper
- # activation ops
- def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- torch.ops._C.silu_and_mul(out, x)
- def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- torch.ops._C.gelu_and_mul(out, x)
- def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- torch.ops._C.gelu_tanh_and_mul(out, x)
- def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- torch.ops._C.gelu_fast(out, x)
- def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- torch.ops._C.gelu_new(out, x)
- def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- torch.ops._C.gelu_quick(out, x)
- # page attention ops
- def paged_attention_v1(
- out: torch.Tensor,
- query: torch.Tensor,
- key_cache: torch.Tensor,
- value_cache: torch.Tensor,
- num_kv_heads: int,
- scale: float,
- block_tables: torch.Tensor,
- seq_lens: torch.Tensor,
- block_size: int,
- max_seq_len: int,
- alibi_slopes: Optional[torch.Tensor],
- kv_cache_dtype: str,
- k_scale: float,
- v_scale: float,
- tp_rank: int = 0,
- blocksparse_local_blocks: int = 0,
- blocksparse_vert_stride: int = 0,
- blocksparse_block_size: int = 64,
- blocksparse_head_sliding_step: int = 0,
- ) -> None:
- torch.ops._C.paged_attention_v1(
- out, query, key_cache, value_cache, num_kv_heads, scale, block_tables,
- seq_lens, block_size, max_seq_len, alibi_slopes, kv_cache_dtype,
- k_scale, v_scale, tp_rank, blocksparse_local_blocks,
- blocksparse_vert_stride, blocksparse_block_size,
- blocksparse_head_sliding_step)
- def paged_attention_v2(
- out: torch.Tensor,
- exp_sum: torch.Tensor,
- max_logits: torch.Tensor,
- tmp_out: torch.Tensor,
- query: torch.Tensor,
- key_cache: torch.Tensor,
- value_cache: torch.Tensor,
- num_kv_heads: int,
- scale: float,
- block_tables: torch.Tensor,
- seq_lens: torch.Tensor,
- block_size: int,
- max_seq_len: int,
- alibi_slopes: Optional[torch.Tensor],
- kv_cache_dtype: str,
- k_scale: float,
- v_scale: float,
- tp_rank: int = 0,
- blocksparse_local_blocks: int = 0,
- blocksparse_vert_stride: int = 0,
- blocksparse_block_size: int = 64,
- blocksparse_head_sliding_step: int = 0,
- ) -> None:
- torch.ops._C.paged_attention_v2(
- out, exp_sum, max_logits, tmp_out, query, key_cache, value_cache,
- num_kv_heads, scale, block_tables, seq_lens, block_size, max_seq_len,
- alibi_slopes, kv_cache_dtype, k_scale, v_scale, tp_rank,
- blocksparse_local_blocks, blocksparse_vert_stride,
- blocksparse_block_size, blocksparse_head_sliding_step)
- def paged_attention_rocm(
- out: torch.Tensor,
- exp_sum: torch.Tensor,
- max_logits: torch.Tensor,
- tmp_out: torch.Tensor,
- query: torch.Tensor,
- key_cache: torch.Tensor,
- value_cache: torch.Tensor,
- num_kv_heads: int,
- scale: float,
- block_tables: torch.Tensor,
- seq_lens: torch.Tensor,
- block_size: int,
- max_seq_len: int,
- alibi_slopes: Optional[torch.Tensor],
- kv_cache_dtype: str,
- k_scale: float,
- v_scale: float,
- ) -> None:
- torch.ops._rocm_C.paged_attention(out, exp_sum, max_logits, tmp_out, query,
- key_cache, value_cache, num_kv_heads,
- scale, block_tables, seq_lens,
- block_size, max_seq_len, alibi_slopes,
- kv_cache_dtype, k_scale, v_scale)
- # pos encoding ops
- def rotary_embedding(
- positions: torch.Tensor,
- query: torch.Tensor,
- key: torch.Tensor,
- head_size: int,
- cos_sin_cache: torch.Tensor,
- is_neox: bool,
- ) -> None:
- torch.ops._C.rotary_embedding(positions, query, key, head_size,
- cos_sin_cache, is_neox)
- def batched_rotary_embedding(positions: torch.Tensor, query: torch.Tensor,
- key: torch.Tensor, head_size: int,
- cos_sin_cache: torch.Tensor, is_neox: bool,
- rot_dim: int,
- cos_sin_cache_offsets: torch.Tensor) -> None:
- torch.ops._C.batched_rotary_embedding(positions, query, key, head_size,
- cos_sin_cache, is_neox, rot_dim,
- cos_sin_cache_offsets)
- # layer norm ops
- def rms_norm(out: torch.Tensor, input: torch.Tensor, weight: torch.Tensor,
- epsilon: float) -> None:
- torch.ops._C.rms_norm(out, input, weight, epsilon)
- def fused_add_rms_norm(input: torch.Tensor, residual: torch.Tensor,
- weight: torch.Tensor, epsilon: float) -> None:
- torch.ops._C.fused_add_rms_norm(input, residual, weight, epsilon)
- def advance_step_flashattn(num_seqs: int, num_queries: int, block_size: int,
- input_tokens: torch.Tensor,
- sampled_token_ids: torch.Tensor,
- input_positions: torch.Tensor,
- seq_lens: torch.Tensor, slot_mapping: torch.Tensor,
- block_tables: torch.Tensor) -> None:
- """Advance a step on GPU for existing inputs for a multi-step runner"""
- return torch.ops._C.advance_step_flashattn(num_seqs, num_queries,
- block_size, input_tokens,
- sampled_token_ids,
- input_positions, seq_lens,
- slot_mapping, block_tables)
- def advance_step_flashinfer(num_seqs: int, num_queries: int, block_size: int,
- input_tokens: torch.Tensor,
- sampled_token_ids: torch.Tensor,
- input_positions: torch.Tensor,
- seq_lens: torch.Tensor, slot_mapping: torch.Tensor,
- block_tables: torch.Tensor,
- paged_kv_indices: torch.Tensor,
- paged_kv_indptr: torch.Tensor,
- paged_kv_last_page_len: torch.Tensor,
- block_table_bound: torch.Tensor) -> None:
- return torch.ops._C.advance_step_flashinfer(
- num_seqs, num_queries, block_size, input_tokens, sampled_token_ids,
- input_positions, seq_lens, slot_mapping, block_tables,
- paged_kv_indices, paged_kv_indptr, paged_kv_last_page_len,
- block_table_bound)
- # quantization ops
- # awq
- def awq_dequantize(qweight: torch.Tensor, scales: torch.Tensor,
- zeros: torch.Tensor, split_k_iters: int, thx: int,
- thy: int) -> torch.Tensor:
- if envs.APHRODITE_USE_TRITON_AWQ:
- from aphrodite.quantization.awq_triton import awq_dequantize_triton
- return awq_dequantize_triton(qweight, scales, zeros)
- return torch.ops._C.awq_dequantize(qweight, scales, zeros, split_k_iters,
- thx, thy)
- def awq_gemm(input: torch.Tensor, qweight: torch.Tensor, qzeros: torch.Tensor,
- scales: torch.Tensor, split_k_iters: int) -> torch.Tensor:
- if envs.APHRODITE_USE_TRITON_AWQ:
- from aphrodite.quantization.awq_triton import awq_gemm_triton
- return awq_gemm_triton(input, qweight, qzeros, scales, split_k_iters)
- return torch.ops._C.awq_gemm(input, qweight, qzeros, scales, split_k_iters)
- # gptq
- def gptq_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
- b_gptq_qzeros: torch.Tensor, b_gptq_scales: torch.Tensor,
- b_g_idx: torch.Tensor, use_exllama: bool,
- bit: int) -> torch.Tensor:
- return torch.ops._C.gptq_gemm(a, b_q_weight, b_gptq_qzeros, b_gptq_scales,
- b_g_idx, use_exllama, bit)
- # TODO: has to be a better way to do this
- try:
- torch.ops._C.gptq_gemm # noqa B018
- @torch.library.register_fake("_C::gptq_gemm")
- def _gptq_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor,
- b_gptq_qzeros: torch.Tensor,
- b_gptq_scales: torch.Tensor, b_g_idx: torch.Tensor,
- use_exllama: bool, bit: int) -> torch.Tensor:
- return torch.empty((a.size(0), b_q_weight.size(1)),
- dtype=a.dtype,
- device=a.device)
- except Exception:
- pass
- def gptq_shuffle(q_weight: torch.Tensor, q_perm: torch.Tensor,
- bit: int) -> None:
- torch.ops._C.gptq_shuffle(q_weight, q_perm, bit)
- # squeezellm
- def squeezellm_gemm(vec: torch.Tensor, mat: torch.Tensor, mul: torch.Tensor,
- lookup_table: torch.Tensor) -> None:
- torch.ops._C.squeezellm_gemm(vec, mat, mul, lookup_table)
- # marlin
- def marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
- b_scales: torch.Tensor, workspace: torch.Tensor, size_m: int,
- size_n: int, size_k: int) -> torch.Tensor:
- return torch.ops._C.marlin_gemm(a, b_q_weight, b_scales, workspace, size_m,
- size_n, size_k)
- # marlin_24
- def gptq_marlin_24_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
- b_meta: torch.Tensor, b_scales: torch.Tensor,
- workspace: torch.Tensor, b_q_type: ScalarType,
- size_m: int, size_n: int, size_k: int) -> torch.Tensor:
- return torch.ops._C.gptq_marlin_24_gemm(a, b_q_weight, b_meta, b_scales,
- workspace, b_q_type, size_m,
- size_n, size_k)
- # TODO: has to be a better way to do this
- try:
- torch.ops._C.gptq_marlin_24_gemm # noqa B018
- @torch.library.register_fake("_C::gptq_marlin_24_gemm")
- def _gptq_marlin_24_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor,
- b_meta: torch.Tensor, b_scales: torch.Tensor,
- workspace: torch.Tensor,
- b_q_type: ScalarType, size_m: int,
- size_n: int, size_k: int) -> torch.Tensor:
- return torch.empty((size_m, size_n), device=a.device, dtype=a.dtype)
- @torch.library.register_fake("_C::gptq_marlin_gemm")
- def _gptq_marlin_gemm_fake(a: torch.Tensor,
- b_q_weight: torch.Tensor,
- b_scales: torch.Tensor,
- b_zeros: torch.Tensor,
- g_idx: torch.Tensor,
- perm: torch.Tensor,
- workspace: torch.Tensor,
- b_q_type: ScalarType,
- size_m: int,
- size_n: int,
- size_k: int,
- is_k_full: bool,
- has_zp: bool = False,
- use_fp32_reduce: bool = False) -> torch.Tensor:
- return torch.empty((size_m, size_n), device=a.device, dtype=a.dtype)
- @torch.library.register_fake("_C::ggml_dequantize")
- def _ggml_dequantize_fake(W: torch.Tensor, quant_type: int, m: int,
- n: int) -> torch.Tensor:
- return torch.empty((m, n), dtype=torch.float16, device=W.device)
- @torch.library.register_fake("_C::ggml_mul_mat_vec_a8")
- def _ggml_mul_mat_vec_a8_fake(
- W: torch.Tensor,
- X: torch.Tensor,
- quant_type: int,
- row: int,
- ) -> torch.Tensor:
- return torch.empty((1, row), dtype=torch.float16, device=W.device)
- @torch.library.register_fake("_C::ggml_mul_mat_a8")
- def _ggml_mul_mat_a8_fake(
- W: torch.Tensor,
- X: torch.Tensor,
- quant_type: int,
- row: int,
- ) -> torch.Tensor:
- batch = X.size(0)
- return torch.empty((batch, row), dtype=torch.float16, device=W.device)
- @torch.library.register_fake("_C::marlin_qqq_gemm")
- def _marlin_qqq_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor,
- s_tok: torch.Tensor, s_ch: torch.Tensor,
- s_group: torch.Tensor, workspace: torch.Tensor,
- size_m: int, size_n: int,
- size_k: int) -> torch.Tensor:
- return torch.empty((size_m, size_n),
- dtype=torch.float16,
- device=a.device)
- @torch.library.register_fake("_C::marlin_gemm")
- def _marlin_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor,
- b_scales: torch.Tensor, workspace: torch.Tensor,
- size_m: int, size_n: int,
- size_k: int) -> torch.Tensor:
- return torch.empty((size_m, size_n),
- dtype=torch.float16,
- device=a.device)
- @torch.library.register_fake("_C::awq_dequantize")
- def _awq_dequantize_fake(qweight: torch.Tensor, scales: torch.Tensor,
- zeros: torch.Tensor, split_k_iters: int, thx: int,
- thy: int) -> torch.Tensor:
- in_c = qweight.size(0)
- qout_c = qweight.size(1)
- out_c = qout_c * 8
- return torch.empty((in_c, out_c),
- dtype=scales.dtype,
- device=scales.device)
- @torch.library.register_fake("_C::awq_gemm")
- def _awq_gemm_fake(input: torch.Tensor, qweight: torch.Tensor,
- qzeros: torch.Tensor, scales: torch.Tensor,
- split_k_iters: int) -> torch.Tensor:
- num_in_feats = input.size(0)
- return torch.empty((split_k_iters, num_in_feats, qweight.size(1) * 8),
- dtype=input.dtype,
- device=input.device).sum(0)
- @torch.library.register_fake("_C::aqlm_gemm")
- def _aqlm_gemm_fake(input: torch.Tensor, codes: torch.Tensor,
- codebooks: torch.Tensor, scales: torch.Tensor,
- codebook_partition_sizes: List[int],
- bias: Optional[torch.Tensor]) -> torch.Tensor:
- out_features = codes.size(0) * codebooks.size(2)
- flat_input = input.reshape((-1, input.size(-1)))
- flat_output = torch.empty((flat_input.size(0), out_features),
- dtype=input.dtype,
- device=input.device)
- output_sizes = list(input.shape)
- output_sizes.pop()
- output_sizes.append(-1)
- return flat_output.reshape(tuple(output_sizes))
- @torch.library.register_fake("_C::aqlm_dequant")
- def _aqlm_dequant_fake(
- codes: torch.Tensor, codebooks: torch.Tensor,
- codebook_partition_sizes: List[int]) -> torch.Tensor:
- in_features = codes.size(1) * 8
- out_features = codes.size(0)
- return torch.empty((out_features, in_features),
- dtype=codebooks.dtype,
- device=codebooks.device)
- @torch.library.register_fake("_C::fp8_marlin_gemm")
- def _fp8_marlin_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor,
- b_scales: torch.Tensor, workspace: torch.Tensor,
- num_bits: int, size_m: int, size_n: int,
- size_k: int) -> torch.Tensor:
- return torch.empty((size_m, size_n), dtype=a.dtype, device=a.device)
- @torch.library.register_fake("_C::machete_gemm")
- def machete_gemm_fake(
- a: torch.Tensor,
- b_q: torch.
- Tensor, # Should be the tensor returned by machete_prepack_B
- b_type: ScalarType,
- b_scales: Optional[torch.Tensor] = None,
- b_zeros: Optional[torch.Tensor] = None,
- b_group_size: Optional[int] = None,
- c: Optional[torch.Tensor] = None,
- alpha: Optional[float] = None,
- beta: Optional[float] = None,
- schedule: Optional[str] = None,
- ) -> torch.Tensor:
- m = a.size(0)
- n = b_q.size(1)
- return torch.empty((m, n), device=a.device, dtype=a.dtype)
- @torch.library.register_fake("_C::machete_prepack_B")
- def machete_prepack_B_fake(b_q_weight: torch.Tensor,
- b_type: ScalarType) -> torch.Tensor:
- return torch.empty_like(b_q_weight)
- @torch.library.register_fake("_C::causal_conv1d_fwd")
- def causal_conv1d_fwd_fake(x: torch.Tensor, weight: torch.Tensor,
- bias_: Optional[torch.Tensor],
- seq_idx_: Optional[torch.Tensor],
- initial_states_: Optional[torch.Tensor],
- final_states_out_: Optional[torch.Tensor],
- silu_activation: bool) -> torch.Tensor:
- return torch.empty_like(x)
- @torch.library.register_fake("_C::causal_conv1d_update")
- def causal_conv1d_update_fake(x: torch.Tensor, conv_state: torch.Tensor,
- weight: torch.Tensor,
- bias_: Optional[torch.Tensor],
- silu_activation: bool) -> torch.Tensor:
- return torch.empty_like(x)
- @torch.library.register_fake("_C::selective_scan_fwd")
- def selective_scan_fwd_fake(
- u: torch.Tensor, delta: torch.Tensor, A: torch.Tensor,
- B: torch.Tensor, C: torch.Tensor, D_: Optional[torch.Tensor],
- z_: Optional[torch.Tensor], delta_bias_: Optional[torch.Tensor],
- delta_softplus: bool, index_: Optional[torch.Tensor],
- x: Optional[torch.Tensor]) -> List[torch.Tensor]:
- a = torch.empty_like(u)
- if x is not None:
- b = x
- else:
- b = torch.empty((u.size(0), u.size(1), A.size(1)),
- dtype=u.dtype,
- device=u.device)
- if z_ is not None:
- c = torch.empty_like(z_)
- return [a, b, c]
- else:
- return [a, b]
- except Exception:
- pass
- # fp8 marlin
- def fp8_marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
- b_scales: torch.Tensor, workspace: torch.Tensor,
- num_bits: int, size_m: int, size_n: int,
- size_k: int) -> torch.Tensor:
- return torch.ops._C.fp8_marlin_gemm(a, b_q_weight, b_scales, workspace,
- num_bits, size_m, size_n, size_k)
- # cutlass
- def cutlass_scaled_mm_supports_fp8(cuda_device_capability: int) -> bool:
- return torch.ops._C.cutlass_scaled_mm_supports_fp8(cuda_device_capability)
- def cutlass_scaled_mm(a: torch.Tensor,
- b: torch.Tensor,
- scale_a: torch.Tensor,
- scale_b: torch.Tensor,
- out_dtype: Type[torch.dtype],
- bias: Optional[torch.Tensor] = None) -> torch.Tensor:
- assert (b.shape[0] % 16 == 0 and b.shape[1] % 16 == 0)
- assert (out_dtype is torch.bfloat16 or out_dtype is torch.float16)
- assert bias is None or bias.shape[0] == b.shape[
- 1] and bias.dtype == out_dtype
- m = a.shape[0]
- n = b.shape[1]
- out = torch.empty((m, n), dtype=out_dtype, device=a.device)
- torch.ops._C.cutlass_scaled_mm(out, a, b, scale_a, scale_b, bias)
- return out
- def cutlass_scaled_mm_azp(a: torch.Tensor,
- b: torch.Tensor,
- scale_a: torch.Tensor,
- scale_b: torch.Tensor,
- out_dtype: torch.dtype,
- azp_adj: torch.Tensor,
- azp: Optional[torch.Tensor] = None,
- bias: Optional[torch.Tensor] = None) -> torch.Tensor:
- assert (b.shape[0] % 16 == 0 and b.shape[1] % 16 == 0)
- assert (out_dtype is torch.bfloat16 or out_dtype is torch.float16)
- assert bias is None or bias.numel(
- ) == b.shape[1] and bias.dtype == out_dtype
- m = a.shape[0]
- n = b.shape[1]
- out = torch.empty((m, n), dtype=out_dtype, device=a.device)
- torch.ops._C.cutlass_scaled_mm_azp(out, a, b, scale_a, scale_b, azp_adj,
- azp, bias)
- return out
- # aqlm
- def aqlm_gemm(input: torch.Tensor, codes: torch.Tensor,
- codebooks: torch.Tensor, scales: torch.Tensor,
- codebook_partition_sizes: List[int],
- bias: Optional[torch.Tensor]) -> torch.Tensor:
- return torch.ops._C.aqlm_gemm(input, codes, codebooks, scales,
- codebook_partition_sizes, bias)
- def aqlm_dequant(codes: torch.Tensor, codebooks: torch.Tensor,
- codebook_partition_sizes: List[int]) -> torch.Tensor:
- return torch.ops._C.aqlm_dequant(codes, codebooks,
- codebook_partition_sizes)
- # gptq_marlin
- def gptq_marlin_repack(b_q_weight: torch.Tensor, perm: torch.Tensor,
- size_k: int, size_n: int,
- num_bits: int) -> torch.Tensor:
- return torch.ops._C.gptq_marlin_repack(b_q_weight, perm, size_k, size_n,
- num_bits)
- def awq_marlin_repack(b_q_weight: torch.Tensor, size_k: int, size_n: int,
- num_bits: int) -> torch.Tensor:
- return torch.ops._C.awq_marlin_repack(b_q_weight, size_k, size_n, num_bits)
- def gptq_marlin_moe_repack(b_q_weight: torch.Tensor, perm: torch.Tensor,
- size_k: int, size_n: int,
- num_bits: int) -> torch.Tensor:
- num_experts = b_q_weight.shape[0]
- assert size_k % 16 == 0
- output = torch.empty((num_experts, size_k // 16, size_n * 2),
- device=b_q_weight.device,
- dtype=b_q_weight.dtype)
- for e in range(num_experts):
- output[e] = torch.ops._C.gptq_marlin_repack(b_q_weight[e], perm[e],
- size_k, size_n, num_bits)
- return output
- def gptq_marlin_gemm(a: torch.Tensor,
- b_q_weight: torch.Tensor,
- b_scales: torch.Tensor,
- b_zeros: torch.Tensor,
- g_idx: torch.Tensor,
- perm: torch.Tensor,
- workspace: torch.Tensor,
- b_q_type: ScalarType,
- size_m: int,
- size_n: int,
- size_k: int,
- is_k_full: bool,
- has_zp: bool = False,
- use_fp32_reduce: bool = False,
- is_zp_float: bool = False) -> torch.Tensor:
- return torch.ops._C.gptq_marlin_gemm(a, b_q_weight, b_scales, b_zeros,
- g_idx, perm, workspace, b_q_type,
- size_m, size_n, size_k, is_k_full,
- has_zp, use_fp32_reduce,
- is_zp_float)
- # machete
- def machete_supported_schedules(b_type: ScalarType) -> List[str]:
- return torch.ops._C.machete_supported_schedules(b_type)
- def machete_gemm(
- a: torch.Tensor,
- b_q: torch.Tensor, # Should be the tensor returned by machete_prepack_B
- b_type: ScalarType,
- b_scales: Optional[torch.Tensor] = None,
- b_zeros: Optional[torch.Tensor] = None,
- b_group_size: Optional[int] = None,
- c: Optional[torch.Tensor] = None,
- alpha: Optional[float] = None,
- beta: Optional[float] = None,
- schedule: Optional[str] = None,
- ) -> torch.Tensor:
- return torch.ops._C.machete_gemm(a, b_q, b_type, b_scales, b_zeros,
- b_group_size, c, alpha, beta, schedule)
- def machete_prepack_B(b_q_weight: torch.Tensor,
- b_type: ScalarType) -> torch.Tensor:
- return torch.ops._C.machete_prepack_B(b_q_weight, b_type)
- def permute_cols(a: torch.Tensor, perm: torch.Tensor) -> torch.Tensor:
- return torch.ops._C.permute_cols(a, perm)
- # fp8
- def scaled_fp8_quant(
- input: torch.Tensor,
- scale: Optional[torch.Tensor] = None,
- num_token_padding: Optional[int] = None,
- scale_ub: Optional[torch.Tensor] = None,
- use_per_token_if_dynamic: bool = False,
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- """
- Quantize input tensor to FP8 and return quantized tensor and scale.
- This function supports both static and dynamic quantization: If you
- provide the scale, it will use static scaling and if you omit it,
- the scale will be determined dynamically. The function also allows
- optional padding of the output tensors for downstream kernels that
- will benefit from padding.
- Args:
- input: The input tensor to be quantized to FP8
- scale: Optional scaling factor for the FP8 quantization
- num_token_padding: If specified, pad the first dimension
- of the output to at least this value.
- use_per_token_if_dynamic: Whether to do per_tensor or per_token
- in the dynamic quantization case.
- Returns:
- Tuple[torch.Tensor, torch.Tensor]: The output tensor in FP8 and
- scaling factor.
- """
- # This code assumes batch_dim and num_tokens are flattened
- assert (input.ndim == 2)
- shape = input.shape
- # For rocm, the output fp8 dtype is torch.float_e3m3fnuz
- out_dtype: torch.dtype = torch.float8_e4m3fnuz if \
- is_hip() else torch.float8_e4m3fn
- if num_token_padding:
- shape = (max(num_token_padding, input.shape[0]), shape[1])
- output = torch.empty(shape, device=input.device, dtype=out_dtype)
- if scale is None:
- if use_per_token_if_dynamic:
- scale = torch.empty((shape[0], 1),
- device=input.device,
- dtype=torch.float32)
- torch.ops._C.dynamic_per_token_scaled_fp8_quant(
- output, input, scale, scale_ub)
- else:
- scale = torch.zeros(1, device=input.device, dtype=torch.float32)
- torch.ops._C.dynamic_scaled_fp8_quant(output, input, scale)
- else:
- # num_token_padding not implemented for this case
- assert (scale.numel() == 1 or num_token_padding is None)
- torch.ops._C.static_scaled_fp8_quant(output, input, scale)
- return output, scale
- # int8
- def scaled_int8_quant(
- input: torch.Tensor,
- scale: Optional[torch.Tensor] = None,
- azp: Optional[torch.Tensor] = None,
- symmetric: bool = True
- ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
- """
- Quantize the input tensor to int8 and return the quantized tensor and scale, and maybe azp.
- Args:
- input: The input tensor to be quantized to int8.
- scale: Optional scaling factor for the int8 quantization.
- When not provided, we invoke dynamic-per-token quantization.
- azp: Optional zero-point for the int8 quantization.
- Must be provided for asymmetric quantization if `scale` is provided.
- symmetric: Whether to use symmetric quantization (scale only, azp ignored).
- Returns:
- Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]] : Output int8 tensor, scales, and optionally azp.
- """
- output = torch.empty_like(input, dtype=torch.int8)
- if scale is not None:
- # static-per-tensor quantization.
- assert symmetric == (
- azp is
- None), "azp must only be provided for asymmetric quantization."
- torch.ops._C.static_scaled_int8_quant(output, input, scale, azp)
- return output, scale, None
- # dynamic-per-token quantization.
- input_scales = torch.empty((input.numel() // input.shape[-1], 1),
- device=input.device,
- dtype=torch.float32)
- input_azp = None if symmetric else torch.empty_like(input_scales,
- dtype=torch.int32)
- torch.ops._C.dynamic_scaled_int8_quant(output, input, input_scales,
- input_azp)
- return output, input_scales, input_azp
- # quip#
- def quip_gemv(
- A: torch.Tensor,
- B: torch.Tensor,
- CB: torch.Tensor,
- ) -> torch.Tensor:
- return torch.ops._C.quip_gemv(A, B, CB)
- def quip_decompress(
- YIs: torch.Tensor,
- CB: torch.Tensor,
- Y: torch.Tensor,
- ) -> torch.Tensor:
- return torch.ops._C.quip_decompress(YIs, CB, Y)
- # qqq ops
- def marlin_qqq_gemm(a: torch.Tensor, b_q_weight: torch.Tensor,
- s_tok: torch.Tensor, s_ch: torch.Tensor,
- s_group: torch.Tensor, workspace: torch.Tensor,
- size_m: int, size_n: int, size_k: int) -> torch.Tensor:
- return torch.ops._C.marlin_qqq_gemm(a, b_q_weight, s_tok, s_ch, s_group,
- workspace, size_m, size_n, size_k)
- # gguf
- def ggml_dequantize(W: torch.Tensor, quant_type: int, m: int,
- n: int) -> torch.Tensor:
- return torch.ops._C.ggml_dequantize(W, quant_type, m, n)
- def ggml_mul_mat_vec_a8(
- W: torch.Tensor,
- X: torch.Tensor,
- quant_type: int,
- row: int,
- ) -> torch.Tensor:
- return torch.ops._C.ggml_mul_mat_vec_a8(W, X, quant_type, row)
- def ggml_mul_mat_a8(
- W: torch.Tensor,
- X: torch.Tensor,
- quant_type: int,
- row: int,
- ) -> torch.Tensor:
- return torch.ops._C.ggml_mul_mat_a8(W, X, quant_type, row)
- # fp6
- def fp_eXmY_linear_forward_cuda(
- EXPONENT: int,
- MANTISSA: int,
- _in_feats: torch.Tensor,
- _weights: torch.Tensor,
- _scales: torch.Tensor,
- splitK: int = 1,
- ) -> torch.Tensor:
- return torch.ops._C.fp_eXmY_linear_forward_cuda(EXPONENT, MANTISSA,
- _in_feats, _weights,
- _scales, splitK)
- # mamba
- def causal_conv1d_fwd(x: torch.Tensor, weight: torch.Tensor,
- bias_: Optional[torch.Tensor],
- seq_idx_: Optional[torch.Tensor],
- initial_states_: Optional[torch.Tensor],
- final_states_out_: Optional[torch.Tensor],
- silu_activation: bool) -> torch.Tensor:
- return torch.ops._C.causal_conv1d_fwd(x, weight, bias_, seq_idx_,
- initial_states_, final_states_out_,
- silu_activation)
- def causal_conv1d_update(
- x: torch.Tensor,
- conv_state: torch.Tensor,
- weight: torch.Tensor,
- bias_: Optional[torch.Tensor],
- silu_activation: bool,
- conv_state_indices: Optional[torch.Tensor],
- ) -> torch.Tensor:
- return torch.ops._C.causal_conv1d_update(x, conv_state, weight, bias_,
- silu_activation,
- conv_state_indices)
- def selective_scan_fwd(u: torch.Tensor, delta: torch.Tensor, A: torch.Tensor,
- B: torch.Tensor, C: torch.Tensor,
- D_: Optional[torch.Tensor], z_: Optional[torch.Tensor],
- delta_bias_: Optional[torch.Tensor],
- delta_softplus: bool, index_: Optional[torch.Tensor],
- x: Optional[torch.Tensor]) -> List[torch.Tensor]:
- return torch.ops._C.selective_scan_fwd(u, delta, A, B, C, D_, z_,
- delta_bias_, delta_softplus, index_,
- x)
- # moe
- def moe_align_block_size(topk_ids: torch.Tensor, num_experts: int,
- block_size: int, sorted_token_ids: torch.Tensor,
- experts_ids: torch.Tensor,
- num_tokens_post_pad: torch.Tensor) -> None:
- torch.ops._C.moe_align_block_size(topk_ids, num_experts, block_size,
- sorted_token_ids, experts_ids,
- num_tokens_post_pad)
- def topk_softmax(topk_weights: torch.Tensor, topk_ids: torch.Tensor,
- token_expert_indicies: torch.Tensor,
- gating_output: float) -> None:
- torch.ops._moe_C.topk_softmax(topk_weights, topk_ids,
- token_expert_indicies, gating_output)
- def reshape_and_cache(
- key: torch.Tensor,
- value: torch.Tensor,
- key_cache: torch.Tensor,
- value_cache: torch.Tensor,
- slot_mapping: torch.Tensor,
- kv_cache_dtype: str,
- k_scale: float,
- v_scale: float,
- ) -> None:
- torch.ops._C_cache_ops.reshape_and_cache(key, value, key_cache,
- value_cache, slot_mapping,
- kv_cache_dtype, k_scale, v_scale)
- def reshape_and_cache_flash(
- key: torch.Tensor,
- value: torch.Tensor,
- key_cache: torch.Tensor,
- value_cache: torch.Tensor,
- slot_mapping: torch.Tensor,
- kv_cache_dtype: str,
- k_scale: float,
- v_scale: float,
- ) -> None:
- torch.ops._C_cache_ops.reshape_and_cache_flash(key, value, key_cache,
- value_cache, slot_mapping,
- kv_cache_dtype, k_scale,
- v_scale)
- def copy_blocks(key_caches: List[torch.Tensor],
- value_caches: List[torch.Tensor],
- block_mapping: torch.Tensor) -> None:
- torch.ops._C_cache_ops.copy_blocks(key_caches, value_caches, block_mapping)
- def swap_blocks(src: torch.Tensor, dst: torch.Tensor,
- block_mapping: torch.Tensor) -> None:
- torch.ops._C_cache_ops.swap_blocks(src, dst, block_mapping)
- def convert_fp8(output: torch.Tensor,
- input: torch.Tensor,
- scale: float = 1.0,
- kv_dtype: str = "fp8") -> None:
- torch.ops._C_cache_ops.convert_fp8(output, input, scale, kv_dtype)
- def get_device_attribute(attribute: int, device: int) -> int:
- return torch.ops._C_cuda_utils.get_device_attribute(attribute, device)
- def get_max_shared_memory_per_block_device_attribute(device: int) -> int:
- # ruff: noqa: E501
- return torch.ops._C_cuda_utils.get_max_shared_memory_per_block_device_attribute(
- device)
- # custom ar
- def init_custom_ar(meta: torch.Tensor, rank_data: torch.Tensor,
- handles: List[str], offsets: List[int], rank: int,
- full_nvlink: bool) -> int:
- return torch.ops._C_custom_ar.init_custom_ar(meta, rank_data, handles,
- offsets, rank, full_nvlink)
- def all_reduce_reg(fa: int, inp: torch.Tensor, out: torch.Tensor) -> None:
- torch.ops._C_custom_ar.all_reduce_reg(fa, inp, out)
- def all_reduce_unreg(fa: int, inp: torch.Tensor, reg_buffer: torch.Tensor,
- out: torch.Tensor) -> None:
- torch.ops._C_custom_ar.all_reduce_unreg(fa, inp, reg_buffer, out)
- def dispose(fa: int) -> None:
- torch.ops._C_custom_ar.dispose(fa)
- def meta_size() -> int:
- return torch.ops._C_custom_ar.meta_size()
- def register_buffer(fa: int, t: torch.Tensor, handles: List[str],
- offsets: List[int]) -> None:
- return torch.ops._C_custom_ar.register_buffer(fa, t, handles, offsets)
- def get_graph_buffer_ipc_meta(fa: int) -> Tuple[List[str], List[int]]:
- return torch.ops._C_custom_ar.get_graph_buffer_ipc_meta(fa)
- def register_graph_buffers(fa: int, handles: List[str],
- offsets: List[List[int]]) -> None:
- torch.ops._C_custom_ar.register_graph_buffers(fa, handles, offsets)
- # Sampling Kernels
- def sampling_from_probs(probs: torch.Tensor,
- uniform_samplers: torch.Tensor,
- deterministic: bool = True,
- check_nan: bool = False) -> torch.Tensor:
- if check_nan and torch.any(torch.isnan(probs)):
- raise ValueError("NaN detected in probs")
- return torch.ops._C.sampling_from_probs(probs, uniform_samplers,
- deterministic)
- def _to_tensor_scalar_tuple(x):
- if isinstance(x, torch.Tensor):
- return (x, 0)
- else:
- return (None, x)
- def top_p_sampling_from_probs(
- probs: torch.Tensor,
- uniform_samples: torch.Tensor,
- top_p: Union[torch.Tensor, float],
- deterministic: bool = True,
- check_nan: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
- if check_nan and torch.any(torch.isnan(probs)):
- raise ValueError("NaN detected in probs")
- return torch.ops._C.top_p_sampling_from_probs(
- probs, uniform_samples, *_to_tensor_scalar_tuple(top_p), deterministic)
- def top_k_sampling_from_probs(
- probs: torch.Tensor,
- uniform_samples: torch.Tensor,
- top_k: Union[torch.Tensor, int],
- deterministic: bool = True,
- check_nan: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
- if check_nan and torch.any(torch.isnan(probs)):
- raise ValueError("NaN detected in probs")
- return torch.ops._C.top_k_sampling_from_probs(
- probs, uniform_samples, *_to_tensor_scalar_tuple(top_k), deterministic)
- def min_p_sampling_from_probs(
- probs: torch.Tensor,
- uniform_samples: torch.Tensor,
- min_p: Union[torch.Tensor, float],
- deterministic: bool = True,
- check_nan: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
- if check_nan and torch.any(torch.isnan(probs)):
- raise ValueError("NaN detected in probs")
- return torch.ops._C.min_p_sampling_from_probs(
- probs, uniform_samples, *_to_tensor_scalar_tuple(min_p), deterministic)
- def top_k_mask_logits(
- logits: torch.Tensor,
- top_k: Union[torch.Tensor, int],
- ) -> torch.Tensor:
- return torch.ops._C.top_k_mask_logits(logits,
- *_to_tensor_scalar_tuple(top_k))
- def top_p_renorm_prob(
- probs: torch.Tensor,
- top_p: Union[torch.Tensor, float],
- ) -> torch.Tensor:
- return torch.ops._C.top_p_renorm_prob(probs,
- *_to_tensor_scalar_tuple(top_p))
- def top_k_renorm_prob(
- probs: torch.Tensor,
- top_k: Union[torch.Tensor, int],
- ) -> torch.Tensor:
- return torch.ops._C.top_k_renorm_prob(probs,
- *_to_tensor_scalar_tuple(top_k))
- def top_k_top_p_sampling_from_logits(
- probs: torch.Tensor,
- uniform_samples: torch.Tensor,
- top_k: Union[torch.Tensor, int],
- top_p: Union[torch.Tensor, float],
- filter_apply_order: str = "top_k_first",
- deterministic: bool = True,
- check_nan: bool = False,
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- if filter_apply_order == "top_k_first":
- masked_logits = top_k_mask_logits(probs, top_k)
- probs = torch.softmax(masked_logits, dim=-1)
- return top_p_sampling_from_probs(probs, uniform_samples, top_p,
- deterministic, check_nan)
- elif filter_apply_order == "joint":
- probs = torch.softmax(probs, dim=-1)
- if check_nan and torch.any(torch.isnan(probs)):
- raise ValueError("NaN detected in probs")
- return torch.ops._C.top_k_top_p_sampling_from_logits(
- probs, uniform_samples, *_to_tensor_scalar_tuple(top_k),
- *_to_tensor_scalar_tuple(top_p), deterministic)
- else:
- raise ValueError(f"Invalid filter_apply_order: {filter_apply_order}")
- def top_k_top_p_sampling_from_probs(
- probs: torch.Tensor,
- uniform_samples: torch.Tensor,
- top_k: Union[torch.Tensor, int],
- top_p: Union[torch.Tensor, float],
- filter_apply_order: str = "top_k_first",
- deterministic: bool = True,
- check_nan: bool = False,
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- if filter_apply_order == "top_k_first":
- renorm_probs = top_k_renorm_prob(probs, top_k)
- return top_p_sampling_from_probs(renorm_probs, uniform_samples, top_p,
- deterministic, check_nan)
- elif filter_apply_order == "joint":
- if check_nan and torch.any(torch.isnan(probs)):
- raise ValueError("NaN detected in probs")
- return torch.ops._C.top_k_top_p_sampling_from_probs(
- probs, uniform_samples, *_to_tensor_scalar_tuple(top_k),
- *_to_tensor_scalar_tuple(top_p), deterministic)
- else:
- raise ValueError(f"Invalid filter_apply_order: {filter_apply_order}")
- # TODO: remove this later
- names_and_values = globals()
- names_and_values_to_update = {}
- # prepare variables to avoid dict size change during iteration
- k, v, arg = None, None, None
- fn_type = type(lambda x: x)
- for k, v in names_and_values.items():
- # find functions that are defined in this file and have torch.Tensor
- # in their annotations. `arg == "torch.Tensor"` is used to handle
- # the case when users use `import __annotations__` to turn type
- # hints into strings.
- if isinstance(v, fn_type) \
- and v.__code__.co_filename == __file__ \
- and any(arg is torch.Tensor or arg == "torch.Tensor"
- for arg in v.__annotations__.values()):
- names_and_values_to_update[k] = hint_on_error(v)
- names_and_values.update(names_and_values_to_update)
- del names_and_values_to_update, names_and_values, v, k, fn_type
|