AlpinDale 9bbc75d2e3 wip 5 months ago
..
__init__.py 5884e0b904 add bitnetforcausallm support 5 months ago
arctic.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
baichuan.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
bitnet.py 9bbc75d2e3 wip 5 months ago
bloom.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
chatglm.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
commandr.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
dbrx.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
decilm.py 50b7c13db0 refactor: attention selector (#552) 6 months ago
deepseek.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
falcon.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
gemma.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
gpt2.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
gpt_bigcode.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
gpt_j.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
gpt_neox.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
internlm2.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
jais.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
llama.py ac79d115b3 add guards for prefix caching, fp8, chunked, etc 5 months ago
llama_embedding.py 50b7c13db0 refactor: attention selector (#552) 6 months ago
llava.py 24a2d9c2c8 minor llava refactoring 6 months ago
minicpm.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
mixtral.py ac79d115b3 add guards for prefix caching, fp8, chunked, etc 5 months ago
mixtral_quant.py ac79d115b3 add guards for prefix caching, fp8, chunked, etc 5 months ago
mpt.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
olmo.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
opt.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
orion.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
phi.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
phi3_small.py 696f2cd59c add phi3_small support with blocksparse attention 5 months ago
qwen.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
qwen2.py ac79d115b3 add guards for prefix caching, fp8, chunked, etc 5 months ago
qwen2_moe.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
stablelm.py 656459fd84 make fp8_e4m3 work on nvidia 6 months ago
starcoder2.py ac79d115b3 add guards for prefix caching, fp8, chunked, etc 5 months ago
vlm_base.py f970f3f3fb add base class for VLMs 6 months ago
xverse.py ac79d115b3 add guards for prefix caching, fp8, chunked, etc 5 months ago