gpt3-2.7B-flash-hdim128.yaml 406 B

123456789101112131415161718
  1. # @package _global_
  2. defaults:
  3. - /experiment/pile/gpt3xl-flash.yaml
  4. model:
  5. config:
  6. n_embd: 2560
  7. n_head: 20 # Headdim 128 is faster than headdim 80
  8. n_layer: 32
  9. initializer_range: ${eval:"(2 / (${.n_embd} * 5)) ** 0.5"}
  10. mlp_checkpoint_lvl: 0
  11. datamodule:
  12. batch_size: ${eval:"1 if ${train.gpu_mem} < 40 else (2 if ${train.gpu_mem} < 80 else 4)"}
  13. train:
  14. optimizer:
  15. lr: 1.6e-4