12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849 |
- _target_: pytorch_lightning.Trainer
- # default values for all trainer parameters
- checkpoint_callback: True
- default_root_dir: null
- gradient_clip_val: 0.0
- process_position: 0
- num_nodes: 1
- num_processes: 1
- gpus: null
- auto_select_gpus: False
- tpu_cores: null
- log_gpu_memory: null
- overfit_batches: 0.0
- track_grad_norm: -1
- check_val_every_n_epoch: 1
- fast_dev_run: False
- accumulate_grad_batches: 1
- max_epochs: 1
- min_epochs: 1
- max_steps: null
- min_steps: null
- limit_train_batches: 1.0
- limit_val_batches: 1.0
- limit_test_batches: 1.0
- val_check_interval: 1.0
- flush_logs_every_n_steps: 100
- log_every_n_steps: 50
- accelerator: null
- sync_batchnorm: False
- precision: 32
- weights_summary: "top"
- weights_save_path: null
- num_sanity_val_steps: 2
- truncated_bptt_steps: null
- resume_from_checkpoint: null
- profiler: null
- benchmark: False
- deterministic: False
- reload_dataloaders_every_epoch: False
- auto_lr_find: False
- replace_sampler_ddp: True
- terminate_on_nan: False
- auto_scale_batch_size: False
- prepare_data_per_node: True
- plugins: null
- amp_backend: "native"
- amp_level: "O2"
- move_metrics_to_cpu: False
|