.. |
__init__.py
|
ec17b6c4d0
fix: Phi3.5 Mini and MoE LoRA inference (#1070)
|
5 days ago |
arctic.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
baichuan.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
bart.py
|
a985143768
core: add cuda graph support for encoder-decoder models (#1051)
|
1 week ago |
blip.py
|
f56d6b396a
vlm: fallback to SDPA for ViT models on CPU backend (#982)
|
2 weeks ago |
blip2.py
|
b4a1e2fd02
vlm: add tensor parallel support for vision transformer models (#971)
|
2 weeks ago |
bloom.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
chameleon.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
chatglm.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
clip.py
|
4d14bd1fe5
vlm: add multi-input support for LLaVA and InternVL models (#1002)
|
2 weeks ago |
commandr.py
|
135dfd648b
fix: LoRA support for Cohere and Jamba models (#1004)
|
2 weeks ago |
dbrx.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
decilm.py
|
9022c6d869
remove progress_bar imports
|
2 months ago |
deepseek.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
deepseek_v2.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
eagle.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
exaone.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
falcon.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
fuyu.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
gemma.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
gemma2.py
|
b33cf04386
quants: add bitsandbytes support for gemma2 model (#1026)
|
1 week ago |
gpt2.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
gpt_bigcode.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
gpt_j.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
gpt_neox.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
granite.py
|
11f49b5341
fix: granite logit scale in logit computation (#1054)
|
1 week ago |
idefics2_vision_model.py
|
f1d0b77c92
[0.6.0] Release Candidate (#481)
|
4 months ago |
interfaces.py
|
0b8b407b6d
feat: support profiling with multiple multi-modal inputs per prompt (#712)
|
3 months ago |
intern_vit.py
|
f56d6b396a
vlm: fallback to SDPA for ViT models on CPU backend (#982)
|
2 weeks ago |
internlm2.py
|
7632f91429
fix: InternLM2 model with Tensor Parallel (#980)
|
2 weeks ago |
internvl.py
|
41ceb754a6
vlm: fix internvl2 inference with various num_patches (#1030)
|
1 week ago |
jais.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
jamba.py
|
135dfd648b
fix: LoRA support for Cohere and Jamba models (#1004)
|
2 weeks ago |
llama.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
llama_embedding.py
|
9022c6d869
remove progress_bar imports
|
2 months ago |
llava.py
|
4d14bd1fe5
vlm: add multi-input support for LLaVA and InternVL models (#1002)
|
2 weeks ago |
llava_next.py
|
766ea79b89
vlm: fix feature size calculation for llava-next models (#1079)
|
2 days ago |
llava_next_video.py
|
be59e30139
vlm: add support for video modality + llava next video (#1014)
|
1 week ago |
mamba.py
|
3bb0f07461
chore: rename `task_handler` to `worker` (#985)
|
2 weeks ago |
mamba_cache.py
|
a113309876
kernel: add meta functions for ops to prevent graph breaks (#1019)
|
1 week ago |
medusa.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
minicpm.py
|
ce7b602f03
model: add support for MiniCPM-3 (#1044)
|
1 week ago |
minicpm3.py
|
ce7b602f03
model: add support for MiniCPM-3 (#1044)
|
1 week ago |
minicpmv.py
|
548e864404
models: add support for QwenVL (#995)
|
2 weeks ago |
mixtral.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
mixtral_quant.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
mlp_speculator.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
molmo.py
|
acc0c727c8
vlm: add support for molmo vision model (#1069)
|
5 days ago |
mpt.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
na_vit.py
|
9f3e7c86e2
feat: add fused Marlin MoE kernel (#934)
|
2 weeks ago |
nemotron.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
olmo.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
olmoe.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
opt.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
orion.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
paligemma.py
|
46d577f019
vlm: fix siglip layernorm and paligemma weight loading (#991)
|
2 weeks ago |
persimmon.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
phi.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
phi3.py
|
ec17b6c4d0
fix: Phi3.5 Mini and MoE LoRA inference (#1070)
|
5 days ago |
phi3_small.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
phi3v.py
|
4d14bd1fe5
vlm: add multi-input support for LLaVA and InternVL models (#1002)
|
2 weeks ago |
phimoe.py
|
ec17b6c4d0
fix: Phi3.5 Mini and MoE LoRA inference (#1070)
|
5 days ago |
pixtral.py
|
1721bea53a
vlm: add support for Pixtral model (#1022)
|
1 week ago |
qwen.py
|
6212072245
api: support LoRA lineage and base model metadata management (#1072)
|
5 days ago |
qwen2.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
qwen2_moe.py
|
5224389dae
chore: skip loading extra bias for qwen2 moe GPTQ (#1011)
|
2 weeks ago |
qwen2_vl.py
|
6212072245
api: support LoRA lineage and base model metadata management (#1072)
|
5 days ago |
siglip.py
|
4d14bd1fe5
vlm: add multi-input support for LLaVA and InternVL models (#1002)
|
2 weeks ago |
solar.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
stablelm.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
starcoder2.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |
ultravox.py
|
8eb4a3cfd3
vlm: support multiple audios per prompt for Ultravox (#990)
|
2 weeks ago |
utils.py
|
a8bdd488b9
distributed: support pipeline parallelism for internvl and internlm2 (#965)
|
2 weeks ago |
xverse.py
|
0dfa6b60ec
core: support logprobs with multi-step scheduling (#963)
|
2 weeks ago |