123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460 |
- # Copyright (c) 2023, Tri Dao.
- import os
- import time
- from pathlib import Path
- import torch
- import pytest
- from einops import rearrange
- from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
- from flash_attn.models.gpt import (
- GPTLMHeadModel,
- combine_state_dicts_tp,
- shard_state_dict_tp,
- )
- from flash_attn.models.baichuan import (
- remap_state_dict_hf_baichuan,
- baichuan_config_to_gpt2_config,
- )
- from flash_attn.utils.distributed import all_gather_raw
- from flash_attn.utils.pretrained import state_dict_from_pretrained
- from flash_attn.utils.generation import update_graph_cache
- @pytest.mark.parametrize(
- "model_name",
- [
- "baichuan-inc/Baichuan-7B",
- "baichuan-inc/Baichuan-13B-Base",
- "baichuan-inc/Baichuan2-7B-Base",
- "baichuan-inc/Baichuan2-13B-Base",
- ],
- )
- def test_baichuan_state_dict(model_name):
- config = baichuan_config_to_gpt2_config(
- AutoConfig.from_pretrained(model_name, trust_remote_code=True)
- )
- pretrained_state_dict = remap_state_dict_hf_baichuan(
- state_dict_from_pretrained(model_name), config
- )
- model = GPTLMHeadModel(config, device="meta") # Without device='meta' init is very slow
- state_dict = model.state_dict()
- assert len(state_dict.keys()) == len(pretrained_state_dict.keys())
- assert state_dict.keys() == pretrained_state_dict.keys()
- for k in state_dict.keys():
- assert state_dict[k].shape == pretrained_state_dict[k].shape
- @pytest.mark.parametrize(
- "model_name",
- [
- "baichuan-inc/Baichuan-7B",
- "baichuan-inc/Baichuan-13B-Base",
- "baichuan-inc/Baichuan2-7B-Base",
- "baichuan-inc/Baichuan2-13B-Base",
- ],
- )
- def test_baichuan_optimized(model_name):
- """Check that our implementation of Baichuan (with all optimizations enabled) matches the
- HF implementation: the output of our forward pass in fp16 should be around the same as the HF
- forward pass in fp16, when compared to the HF forward pass in fp32.
- """
- dtype = torch.float16
- device = "cuda"
- config = baichuan_config_to_gpt2_config(
- AutoConfig.from_pretrained(model_name, trust_remote_code=True)
- )
- config.use_flash_attn = True
- config.fused_bias_fc = True
- config.fused_mlp = False # We don't have fused GatedMLP yet
- config.fused_dropout_add_ln = True
- config.residual_in_fp32 = True
- pretrained_state_dict = remap_state_dict_hf_baichuan(
- state_dict_from_pretrained(model_name), config
- )
- model = GPTLMHeadModel(config, device=device, dtype=dtype)
- model.load_state_dict(pretrained_state_dict)
- model.eval()
- torch.manual_seed(0)
- batch_size = 2
- max_seqlen = 256
- seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device)
- input_ids = torch.randint(
- 0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device
- )
- with torch.no_grad():
- out = model.transformer(input_ids)
- logits = model(input_ids).logits
- del model
- # Without device_map, the model is loaded on the CPU, which is very slow
- # Need auto here since the 13B fp32 model doesn't fit in memory on a A100 40GB
- model_ref = AutoModelForCausalLM.from_pretrained(
- model_name, device_map="auto", trust_remote_code=True
- )
- model_ref.eval()
- with torch.no_grad():
- out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device)
- logits_ref = model_ref(input_ids).logits.to(device=device)
- del model_ref
- model_hf = AutoModelForCausalLM.from_pretrained(
- model_name,
- torch_dtype=dtype,
- device_map={"": device},
- trust_remote_code=True,
- )
- model_hf.eval()
- with torch.no_grad():
- out_hf = model_hf.model(input_ids).last_hidden_state
- logits_hf = model_hf(input_ids).logits
- del model_hf
- print(f"Output max diff: {(out - out_ref).abs().max().item()}")
- print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
- print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}")
- print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}")
- assert (out - out_ref).abs().max().item() < 3 * (out_hf - out_ref).abs().max().item()
- print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
- print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}")
- print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}")
- print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}")
- assert (logits - logits_ref).abs().max().item() < 3 * (
- logits_hf - logits_ref
- ).abs().max().item()
- # torchrun --no_python --nproc_per_node=2 pytest -q -s tests/models/test_baichuan.py -k "test_baichuan_parallel_forward"
- @pytest.mark.parametrize("world_size", [2])
- @pytest.mark.parametrize(
- "model_name",
- [
- "baichuan-inc/Baichuan-7B",
- "baichuan-inc/Baichuan-13B-Base",
- "baichuan-inc/Baichuan2-7B-Base",
- "baichuan-inc/Baichuan2-13B-Base",
- ],
- )
- def test_baichuan_parallel_forward(model_name, world_size):
- """Check that our implementation of Baichuan (with all optimizations enabled) matches the
- HF implementation: the output of our forward pass in fp16 should be around the same as the HF
- forward pass in fp16, when compared to the HF forward pass in fp32.
- """
- from apex.transformer import parallel_state
- dtype = torch.float16
- config = baichuan_config_to_gpt2_config(
- AutoConfig.from_pretrained(model_name, trust_remote_code=True)
- )
- config.use_flash_attn = True
- config.fused_bias_fc = True
- config.fused_mlp = False # We don't have fused GatedMLP yet
- config.fused_dropout_add_ln = True
- config.residual_in_fp32 = True
- if not torch.distributed.is_initialized():
- torch.distributed.init_process_group(backend="nccl", init_method="env://")
- device = f"cuda:{torch.distributed.get_rank()}"
- assert world_size <= torch.distributed.get_world_size()
- parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
- rank = parallel_state.get_tensor_model_parallel_rank()
- process_group = parallel_state.get_tensor_model_parallel_group()
- pretrained_state_dict = remap_state_dict_hf_baichuan(
- state_dict_from_pretrained(model_name), config
- )
- model = GPTLMHeadModel(config, process_group=process_group, device=device, dtype=dtype)
- model.load_state_dict(shard_state_dict_tp(pretrained_state_dict, config, world_size, rank))
- model.eval()
- torch.manual_seed(0)
- batch_size = 2
- max_seqlen = 256
- seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device)
- input_ids = torch.randint(
- 0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device
- )
- with torch.no_grad():
- out = model.transformer(input_ids)
- out, _ = all_gather_raw(out, process_group=process_group)
- out = rearrange(out, "(b s) d -> b s d", b=batch_size)
- logits = model(input_ids).logits
- logits = rearrange(logits, "(b s) d -> b s d", b=batch_size)
- logits, _ = all_gather_raw(logits, process_group)
- logits = rearrange(logits, "(n b) ... d -> b ... (n d)", b=batch_size)
- del model
- parallel_state.destroy_model_parallel()
- if rank == 0:
- # Without device_map, the model is loaded on the CPU, which is very slow
- model_ref = AutoModelForCausalLM.from_pretrained(
- model_name, device_map="auto", trust_remote_code=True
- )
- model_ref.eval()
- with torch.no_grad():
- out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device)
- logits_ref = model_ref(input_ids).logits.to(device=device)
- del model_ref
- model_hf = AutoModelForCausalLM.from_pretrained(
- model_name, torch_dtype=dtype, device_map="auto", trust_remote_code=True
- )
- model_hf.eval()
- with torch.no_grad():
- out_hf = model_hf.model(input_ids).last_hidden_state.to(device=device)
- logits_hf = model_hf(input_ids).logits.to(device=device)
- del model_hf
- print(f"Output max diff: {(out - out_ref).abs().max().item()}")
- print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
- print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}")
- print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}")
- assert (out - out_ref).abs().max().item() < 2 * (out_hf - out_ref).abs().max().item()
- print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}")
- print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}")
- print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}")
- print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}")
- assert (logits - logits_ref).abs().max().item() < 2 * (
- logits_hf - logits_ref
- ).abs().max().item()
- @pytest.mark.parametrize(
- "model_name", ["baichuan-inc/Baichuan-7B", "baichuan-inc/Baichuan-13B-Base"]
- )
- def test_baichuan_generation(model_name):
- dtype = torch.float16
- device = "cuda"
- config = baichuan_config_to_gpt2_config(
- AutoConfig.from_pretrained(model_name, trust_remote_code=True)
- )
- config.use_flash_attn = True
- config.fused_bias_fc = True
- config.fused_mlp = False # We don't have fused GatedMLP yet
- config.fused_dropout_add_ln = True
- config.residual_in_fp32 = True
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
- eos_token_id = tokenizer.eos_token_id
- torch.manual_seed(0)
- batch_size = 1
- seqlen = 2048
- max_length = 2048 + 150
- input_ids = torch.randint(
- 0, config.vocab_size, (batch_size, seqlen), dtype=torch.long, device=device
- )
- model_hf = AutoModelForCausalLM.from_pretrained(
- model_name, torch_dtype=dtype, device_map={"": device}, trust_remote_code=True
- )
- model_hf.eval()
- print("HF fp16")
- torch.cuda.synchronize()
- start = time.time()
- out_hf = model_hf.generate(
- input_ids=input_ids,
- max_length=max_length,
- return_dict_in_generate=True,
- output_scores=True,
- )
- torch.cuda.synchronize()
- print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
- del model_hf
- # Need auto here since the 13B fp32 model doesn't fit in memory on a A100 40GB
- model_ref = AutoModelForCausalLM.from_pretrained(
- model_name, device_map="auto", trust_remote_code=True
- )
- model_ref.eval()
- with torch.no_grad():
- logits_ref = model_ref(out_hf.sequences).logits[:, (seqlen - 1) : -1].to(device=device)
- del model_ref
- pretrained_state_dict = remap_state_dict_hf_baichuan(
- state_dict_from_pretrained(model_name), config
- )
- model = GPTLMHeadModel(config, device=device, dtype=dtype)
- model.load_state_dict(pretrained_state_dict)
- model.eval()
- model(input_ids) # Warm up
- print("Without CUDA graph")
- torch.cuda.synchronize()
- start = time.time()
- out = model.generate(
- input_ids=input_ids,
- max_length=max_length,
- eos_token_id=eos_token_id,
- return_dict_in_generate=True,
- output_scores=True,
- enable_timing=True,
- teacher_outputs=out_hf.sequences,
- )
- torch.cuda.synchronize()
- print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
- # Capture graph outside the timing loop
- batch_size, seqlen_og = input_ids.shape
- model._decoding_cache = update_graph_cache(model, None, batch_size, seqlen_og, max_length)
- print("With CUDA graph")
- torch.cuda.synchronize()
- start = time.time()
- out_cg = model.generate(
- input_ids=input_ids,
- max_length=max_length,
- cg=True,
- return_dict_in_generate=True,
- output_scores=True,
- enable_timing=True,
- teacher_outputs=out_hf.sequences,
- )
- torch.cuda.synchronize()
- print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
- with torch.no_grad():
- logits_parallel = model(out_hf.sequences).logits[:, (seqlen - 1) : -1]
- logits_hf = torch.stack(out_hf.scores, dim=1)
- logits = torch.stack(out.scores, dim=1)
- logits_cg = torch.stack(out_cg.scores, dim=1)
- del model
- hf_error = (logits_hf - logits_ref).abs().max().item()
- print(f"HF fp16 logits max diff: {hf_error}")
- print(f"Logits max diff: {(logits - logits_ref).abs().max().item() }")
- print(f"Logits CG max diff: {(logits_cg - logits_ref).abs().max().item() }")
- assert (logits_parallel - logits_ref).abs().max().item() < 2 * hf_error
- assert (logits - logits_ref).abs().max().item() < 2 * hf_error
- assert torch.equal(logits_cg, logits)
- # torchrun --no_python --nproc_per_node=2 pytest -q -s tests/models/test_baichuan.py -k "baichuan_parallel_generation"
- @pytest.mark.parametrize("world_size", [2])
- @pytest.mark.parametrize("model_name", ["baichuan-inc/Baichuan-7B"])
- def test_baichuan_parallel_generation(model_name, world_size):
- """Check that our implementation matches the HF implementation:
- the scores in fp16 should be around the same as the HF scores in fp16, when compared to
- the HF scores in fp32.
- """
- from apex.transformer import parallel_state
- dtype = torch.float16
- config = baichuan_config_to_gpt2_config(
- AutoConfig.from_pretrained(model_name, trust_remote_code=True)
- )
- config.use_flash_attn = True
- config.fused_bias_fc = True
- config.fused_mlp = False # We don't have fused GatedMLP yet
- config.fused_dropout_add_ln = False
- config.residual_in_fp32 = True
- config.pad_vocab_size_multiple = 8 * world_size
- config.sequence_parallel = False # Need to set this to False for generation
- os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
- if not torch.distributed.is_initialized():
- torch.distributed.init_process_group(backend="nccl", init_method="env://")
- device = f"cuda:{torch.distributed.get_rank()}"
- assert world_size <= torch.distributed.get_world_size()
- parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size)
- rank = parallel_state.get_tensor_model_parallel_rank()
- process_group = parallel_state.get_tensor_model_parallel_group()
- torch.manual_seed(0)
- batch_size = 1
- seqlen = 100
- max_length = 150
- input_ids = torch.randint(
- 0, config.vocab_size, (batch_size, seqlen), dtype=torch.long, device=device
- )
- # Need this, otherwise when we capture the graph the process for GPU 1 would run on both
- # GPU0 and GPU1 and things would hang
- torch.cuda.set_device(device)
- pretrained_state_dict = remap_state_dict_hf_baichuan(
- state_dict_from_pretrained(model_name), config
- )
- model = GPTLMHeadModel(config, process_group=process_group, device=device, dtype=dtype)
- model.load_state_dict(shard_state_dict_tp(pretrained_state_dict, config, world_size, rank))
- model.eval()
- print("Without CUDA graph")
- out = model.generate(
- input_ids=input_ids,
- max_length=max_length,
- tensor_parallel=world_size,
- vocab_size=config.vocab_size,
- # teacher_outputs=out_hf.sequences,
- return_dict_in_generate=True,
- output_scores=True,
- enable_timing=True,
- )
- # Capture graph outside the timing loop
- batch_size, seqlen_og = input_ids.shape
- model._decoding_cache = update_graph_cache(model, None, batch_size, seqlen_og, max_length)
- print("With CUDA graph")
- out_cg = model.generate(
- input_ids=input_ids,
- max_length=max_length,
- tensor_parallel=world_size,
- vocab_size=config.vocab_size,
- cg=True,
- # teacher_outputs=out_hf.sequences,
- return_dict_in_generate=True,
- output_scores=True,
- enable_timing=True,
- )
- del model
- parallel_state.destroy_model_parallel()
- if rank == 0:
- # Without device_map, the model is loaded on the CPU, which is very slow
- model_hf = AutoModelForCausalLM.from_pretrained(
- model_name, torch_dtype=dtype, device_map="auto", trust_remote_code=True
- )
- model_hf.eval()
- print("HF fp16")
- torch.cuda.synchronize()
- start = time.time()
- with torch.inference_mode():
- out_hf = model_hf.generate(
- input_ids=input_ids,
- max_length=max_length,
- return_dict_in_generate=True,
- output_scores=True,
- )
- torch.cuda.synchronize()
- print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms")
- del model_hf
- model_ref = AutoModelForCausalLM.from_pretrained(
- model_name, device_map="auto", trust_remote_code=True
- )
- model_ref.eval()
- with torch.inference_mode():
- logits_ref = model_ref(out_hf.sequences).logits[:, (seqlen - 1) : -1]
- del model_ref
- logits_hf = torch.stack(out_hf.scores, dim=1)
- logits = torch.stack(out.scores, dim=1)
- logits_cg = torch.stack(out_cg.scores, dim=1)
- hf_error = (logits_hf - logits_ref).abs().max().item()
- print(f"HF fp16 logits max diff: {hf_error}")
- print(f"Logits max diff: {(logits - logits_ref).abs().max().item() }")
- print(f"Logits CG max diff: {(logits_cg - logits_ref).abs().max().item() }")
- assert (logits - logits_ref).abs().max().item() < 2 * hf_error
- assert torch.equal(logits_cg, logits)
|