gpt3-2.7B-hf-hdim128.yaml 242 B

1234567891011121314151617
  1. # @package _global_
  2. defaults:
  3. - /experiment/pile/gpt3xl-hf.yaml
  4. model:
  5. config:
  6. n_embd: 2560
  7. n_head: 128
  8. n_layer: 32
  9. # OOM on A100 80GB even with batch_size = 1
  10. datamodule:
  11. batch_size: 1
  12. train:
  13. optimizer:
  14. lr: 1.6e-4