all_params.yaml 1.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849
  1. _target_: pytorch_lightning.Trainer
  2. # default values for all trainer parameters
  3. checkpoint_callback: True
  4. default_root_dir: null
  5. gradient_clip_val: 0.0
  6. process_position: 0
  7. num_nodes: 1
  8. num_processes: 1
  9. gpus: null
  10. auto_select_gpus: False
  11. tpu_cores: null
  12. log_gpu_memory: null
  13. overfit_batches: 0.0
  14. track_grad_norm: -1
  15. check_val_every_n_epoch: 1
  16. fast_dev_run: False
  17. accumulate_grad_batches: 1
  18. max_epochs: 1
  19. min_epochs: 1
  20. max_steps: null
  21. min_steps: null
  22. limit_train_batches: 1.0
  23. limit_val_batches: 1.0
  24. limit_test_batches: 1.0
  25. val_check_interval: 1.0
  26. flush_logs_every_n_steps: 100
  27. log_every_n_steps: 50
  28. accelerator: null
  29. sync_batchnorm: False
  30. precision: 32
  31. weights_summary: "top"
  32. weights_save_path: null
  33. num_sanity_val_steps: 2
  34. truncated_bptt_steps: null
  35. resume_from_checkpoint: null
  36. profiler: null
  37. benchmark: False
  38. deterministic: False
  39. reload_dataloaders_every_epoch: False
  40. auto_lr_find: False
  41. replace_sampler_ddp: True
  42. terminate_on_nan: False
  43. auto_scale_batch_size: False
  44. prepare_data_per_node: True
  45. plugins: null
  46. amp_backend: "native"
  47. amp_level: "O2"
  48. move_metrics_to_cpu: False