.. |
__init__.py
|
5884e0b904
add bitnetforcausallm support
|
6 maanden geleden |
arctic.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
baichuan.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
bitnet.py
|
9bbc75d2e3
wip
|
6 maanden geleden |
bloom.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
chatglm.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
commandr.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
dbrx.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
decilm.py
|
50b7c13db0
refactor: attention selector (#552)
|
6 maanden geleden |
deepseek.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
falcon.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
gemma.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
gpt2.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
gpt_bigcode.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
gpt_j.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
gpt_neox.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
internlm2.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
jais.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
llama.py
|
ac79d115b3
add guards for prefix caching, fp8, chunked, etc
|
6 maanden geleden |
llama_embedding.py
|
50b7c13db0
refactor: attention selector (#552)
|
6 maanden geleden |
llava.py
|
24a2d9c2c8
minor llava refactoring
|
6 maanden geleden |
minicpm.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
mixtral.py
|
ac79d115b3
add guards for prefix caching, fp8, chunked, etc
|
6 maanden geleden |
mixtral_quant.py
|
ac79d115b3
add guards for prefix caching, fp8, chunked, etc
|
6 maanden geleden |
mpt.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
olmo.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
opt.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
orion.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
phi.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
phi3_small.py
|
696f2cd59c
add phi3_small support with blocksparse attention
|
6 maanden geleden |
qwen.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
qwen2.py
|
ac79d115b3
add guards for prefix caching, fp8, chunked, etc
|
6 maanden geleden |
qwen2_moe.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
stablelm.py
|
656459fd84
make fp8_e4m3 work on nvidia
|
6 maanden geleden |
starcoder2.py
|
ac79d115b3
add guards for prefix caching, fp8, chunked, etc
|
6 maanden geleden |
vlm_base.py
|
f970f3f3fb
add base class for VLMs
|
6 maanden geleden |
xverse.py
|
ac79d115b3
add guards for prefix caching, fp8, chunked, etc
|
6 maanden geleden |