gpt2s-flash.yaml 492 B

12345678910111213141516171819
  1. # @package _global_
  2. defaults:
  3. - /experiment/owt/base.yaml
  4. - override /model: gpt2
  5. - override /model/gpt2model: gpt2-small
  6. model:
  7. config:
  8. # n_positions is already set to ${datamodule.max_length}
  9. residual_in_fp32: True
  10. use_flash_attn: True
  11. fused_bias_fc: True
  12. fused_mlp: True
  13. fused_dropout_add_ln: True
  14. pad_vocab_size_multiple: 8
  15. datamodule:
  16. # batch_size: 64
  17. batch_size: ${eval:"16 if ${train.gpu_mem} < 24 else (32 if ${train.gpu_mem} < 40 else 64)"}