gpt2xl-flash.yaml 1.0 KB

123456789101112131415161718192021
  1. # @package _global_
  2. defaults:
  3. - /experiment/owt/gpt2l-flash.yaml
  4. - override /model/gpt2model: gpt2-xlarge
  5. # Can enable mlp_checkpoint_lvl to fit to A100 40GB
  6. # model:
  7. # config:
  8. # # mlp_checkpoint_lvl: ${eval:"[1] * 18 + [2] * 18"}
  9. # mlp_checkpoint_lvl: 1
  10. datamodule:
  11. batch_size: ${eval:"2 if ${train.gpu_mem} < 24 else (4 if ${train.gpu_mem} < 40 else (8 if ${train.gpu_mem} < 80 else 16))"}
  12. # With adamw-zero optimizer, on A100 40GB:
  13. # checkpoint_lvl=1, batch size = 4: mem 37GB, 4650ms / batch of 512 (285ms * 15 + 375ms * 1)
  14. # checkpoint_lvl=1, batch size = 8: mem 46GB, 4330ms / batch of 512 (530ms * 7 + 620ms * 1)
  15. # checkpoint_lvl=2, batch size = 8: mem 41GB, 4570ms / batch of 512 (560ms * 7 + 650ms * 1)
  16. # With adamw-apex-distributed optimizer:
  17. # checkpoint_lvl=1, batch size = 8: mem 41.5GB, 4500ms / batch of 512 (550ms * 7 + 650ms * 1)
  18. # checkpoint_lvl=1 for 24 layers and checkpoint_lvl=2 for 24 layers,
  19. # batch size = 8: mem 39GB, 4640ms / batch of 512 (565ms * 7 + 675ms * 1)