1
0

gpt2m-flash.yaml 423 B

1234567891011121314151617
  1. # @package _global_
  2. defaults:
  3. - /experiment/owt/gpt2s-flash.yaml
  4. - override /model/gpt2model: gpt2-medium
  5. # Can enable mlp_checkpoint_lvl to fit batch_size 32 to A100 40GB
  6. # model:
  7. # config:
  8. # mlp_checkpoint_lvl: 1
  9. datamodule:
  10. # batch_size: 32
  11. batch_size: ${eval:"8 if ${train.gpu_mem} < 24 else (16 if ${train.gpu_mem} < 40 else (32 if ${train.gpu_mem} < 80 else 64))"}
  12. train:
  13. optimizer:
  14. lr: 1.5e-4