generated from ashleve/lightning-hydra-template
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathcb55_polygon_3cl_train_20_run_unet64.yaml
87 lines (74 loc) · 2.15 KB
/
cb55_polygon_3cl_train_20_run_unet64.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# @package _global_
# to execute this experiment run:
# python run.py +experiment=exp_example_full
defaults:
- /mode: experiment.yaml
- /plugins: null
- /task: semantic_segmentation_RGB.yaml
- /loss: crossentropyloss.yaml
- /metric:
- iou.yaml
- precision.yaml
- recall.yaml
- f1_score.yaml
- /model/backbone: unet64.yaml
- /model/header: unet_segmentation.yaml
- /optimizer: adam.yaml
- /callbacks:
- check_compatibility.yaml
- model_checkpoint.yaml
- watch_model_wandb.yaml
- /logger:
- wandb.yaml # set logger here or use command line (e.g. `python run.py logger=wandb`)
- csv.yaml
- _self_
# we override default configurations with nulls to prevent them from loading at all
# instead we define all modules and their paths directly in this config,
# so everything is stored in one place for more readibility
name: "sem_seg_cb55_AB1_3cl_unet_loss_no_weights_100_ep_20_train"
train: True
test: True
predict: False
trainer:
_target_: pytorch_lightning.Trainer
accelerator: 'gpu'
devices: -1
strategy: 'ddp_find_unused_parameters_false'
min_epochs: 1
max_epochs: 100
precision: 16
check_val_every_n_epoch: 1
accumulate_grad_batches: 5
task:
confusion_matrix_log_every_n_epoch: 10
confusion_matrix_val: True
confusion_matrix_test: False
datamodule:
_target_: src.datamodules.RGB.datamodule.DataModuleRGB
data_dir: /net/research-hisdoc/datasets/semantic_segmentation/datasets/polygon_gt/CB55/960_1344
num_workers: 4
batch_size: 1
shuffle: True
drop_last: True
data_folder_name: data
gt_folder_name: gt
optimizer:
lr: 1e-3
betas: [0.9, 0.999]
eps: 1e-5
callbacks:
model_checkpoint:
monitor: "val/jaccard_index"
mode: "max"
filename: ${checkpoint_folder_name}CSG-polygon-3cl-unet
# watch_model:
# log_freq: 1000
model:
header:
features: 64
logger:
wandb:
project: icdar
name: ${name}
tags: ["unet", "AB1", "3-classes", "baseline", "50-epochs", "no-weights"]
group: 'baseline'