gpt3-2.7B-flash-rotary.yaml 408 B

123456789101112131415161718
  1. # @package _global_
  2. defaults:
  3. - /experiment/pile/gpt3xl-flash-rotary.yaml
  4. model:
  5. config:
  6. n_embd: 2560
  7. n_head: 32
  8. n_layer: 32
  9. initializer_range: ${eval:"(2 / (${.n_embd} * 5)) ** 0.5"}
  10. mlp_checkpoint_lvl: 0
  11. datamodule:
  12. batch_size: ${eval:"4 if ${train.gpu_mem} < 24 else (8 if ${train.gpu_mem} < 40 else (16 if ${train.gpu_mem} < 80 else 32))"}
  13. train:
  14. optimizer:
  15. lr: 1.6e-4