generated from ashleve/lightning-hydra-template
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathfine_tune_cb55_AB1_train_20_run_divanet.yaml
86 lines (73 loc) · 2.29 KB
/
fine_tune_cb55_AB1_train_20_run_divanet.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# @package _global_
# to execute this experiment run:
# python run.py +experiment=exp_example_full
defaults:
- /mode: experiment.yaml
- /plugins: null
- /task: semantic_segmentation_RGB.yaml
- /loss: crossentropyloss_balanced.yaml
- /metric:
- iou.yaml
- /model/backbone: divanet.yaml
- /model/header: identity.yaml
- /optimizer: adam.yaml
- /callbacks:
- check_compatibility.yaml
- model_checkpoint.yaml
- watch_model_wandb.yaml
- /logger:
- wandb.yaml # set logger here or use command line (e.g. `python run.py logger=wandb`)
- csv.yaml
- _self_
# we override default configurations with nulls to prevent them from loading at all
# instead we define all modules and their paths directly in this config,
# so everything is stored in one place for more readibility
name: "fine_tune_cb55_AB1_train_20_divanet_loss_weights"
train: True
test: True
predict: False
trainer:
_target_: pytorch_lightning.Trainer
accelerator: 'gpu'
devices: -1
strategy: 'ddp_find_unused_parameters_false'
min_epochs: 1
max_epochs: 100
precision: 16
check_val_every_n_epoch: 1
accumulate_grad_batches: 5
task:
confusion_matrix_log_every_n_epoch: 10
confusion_matrix_val: True
confusion_matrix_test: True
datamodule:
_target_: src.datamodules.RGB.datamodule.DataModuleRGB
data_dir: /net/research-hisdoc/datasets/semantic_segmentation/datasets/CB55-splits/AB1
num_workers: 4
batch_size: 1
shuffle: True
drop_last: True
data_folder_name: data
gt_folder_name: gt
train_folder_name: training-20
val_folder_name: validation
optimizer:
lr: 1e-3
betas: [0.9, 0.999]
eps: 1e-5
callbacks:
model_checkpoint:
monitor: "val/jaccard_index"
mode: "max"
filename: ${checkpoint_folder_name}fine-tune-cb55-train-20-divanet
# watch_model:
# log_freq: 1000
model:
backbone:
path_to_weights: /netscratch/experiments_lars_paul/lars/experiments/sem_seg_synthetic_DPC_120_divanet_loss_no_weights/2022-10-25/14-12-09/checkpoints/backbone_last.pth
logger:
wandb:
project: "ijdar"
name: ${name}
tags: ["divanet", "AB1", "training-20", "4-classes", "baseline", "balanced"]
group: 'baseline'