1
0

default.yaml 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445
  1. # rich_progress_bar:
  2. # _target_: pytorch_lightning.callbacks.RichProgressBar
  3. rich_model_summary:
  4. _target_: pytorch_lightning.callbacks.RichModelSummary
  5. model_checkpoint:
  6. _target_: pytorch_lightning.callbacks.ModelCheckpoint
  7. monitor: "val/acc" # name of the logged metric which determines when model is improving
  8. mode: "max" # can be "max" or "min"
  9. save_top_k: 1 # save k best models (determined by above metric)
  10. save_last: True # additionally always save model from last epoch
  11. verbose: False
  12. dirpath: ${oc.env:CHECKPOINT_DIR,checkpoints}/${oc.select:name,''}
  13. filename: "epoch_{epoch:03d}"
  14. auto_insert_metric_name: False
  15. early_stopping:
  16. _target_: pytorch_lightning.callbacks.EarlyStopping
  17. monitor: "val/acc" # name of the logged metric which determines when model is improving
  18. mode: "max" # can be "max" or "min"
  19. patience: 100 # how many epochs of not improving until training stops
  20. min_delta: 0 # minimum change in the monitored metric needed to qualify as an improvement
  21. learning_rate_monitor:
  22. _target_: pytorch_lightning.callbacks.LearningRateMonitor
  23. logging_interval: step
  24. speed_monitor:
  25. _target_: src.callbacks.speed_monitor.SpeedMonitor
  26. intra_step_time: True
  27. inter_step_time: True
  28. epoch_time: True
  29. loss_scale_monitor:
  30. _target_: src.callbacks.loss_scale_monitor.LossScaleMonitor
  31. params_log:
  32. _target_: src.callbacks.params_log.ParamsLog
  33. total_params_log: True
  34. trainable_params_log: True
  35. non_trainable_params_log: True
  36. gpu_affinity:
  37. _target_: src.callbacks.gpu_affinity.GpuAffinity