Skip to content

Commit

Permalink
🔀 Merge pull request #83 from DIVA-DIA/dev_resnet
Browse files Browse the repository at this point in the history
Created resnet FCN header
  • Loading branch information
lvoegtlin authored Nov 17, 2021
2 parents fa0385b + 15eec22 commit 008f4d0
Show file tree
Hide file tree
Showing 15 changed files with 274 additions and 61 deletions.
17 changes: 17 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -94,3 +94,20 @@ python run.py trainer.max_epochs=20 datamodule.batch_size=64
11. Go to the root folder of the framework and activate the environment (source .autoenv OR conda activate unsupervised_learning)
12. Log into wandb. Execute `wandb login` and follow the instructions
13. Now you should be able to run the basic experiment from PyCharm


### Loading models
You can load the different model parts `backbone` or `header` as well as the whole task.
To load the `backbone` or the `header` you need to add to your experiment config the field `path_to_weights`.
e.g.
```
model:
header:
path_to_weights: /my/path/to/the/pth/file
```
To load the whole task you need to provide the path to the whole task to the trainer. This is with the field `resume_from_checkpoint`.
e.g.
```
trainer:
resume_from_checkpoint: /path/to/.ckpt/file
```
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# @package _global_

# to execute this experiment run:
# python run.py +experiment=exp_example_full

defaults:
- /plugins: default.yaml
- /task: semantic_segmentation_HisDB.yaml
- /loss: crossentropyloss.yaml
- /metric: hisdbiou.yaml
- /model/backbone: resnet18.yaml
- /model/header: resnet_segmentation.yaml
- /optimizer: adam.yaml
- /callbacks:
- check_compatibility.yaml
- model_checkpoint.yaml
- watch_model_wandb.yaml
- /logger:
- wandb.yaml # set logger here or use command line (e.g. `python run.py logger=wandb`)
- csv.yaml

# we override default configurations with nulls to prevent them from loading at all
# instead we define all modules and their paths directly in this config,
# so everything is stored in one place for more readibility

seed: 42

train: True
test: False

trainer:
_target_: pytorch_lightning.Trainer
gpus: -1
accelerator: 'ddp'
min_epochs: 1
max_epochs: 3
weights_summary: full
precision: 16

task:
confusion_matrix_log_every_n_epoch: 1
confusion_matrix_val: True
confusion_matrix_test: True

datamodule:
_target_: src.datamodules.DivaHisDB.datamodule_cropped.DivaHisDBDataModuleCropped

data_dir: /netscratch/datasets/semantic_segmentation/datasets_cropped/CB55-10-segmentation
crop_size: 256
num_workers: 4
batch_size: 16
shuffle: True
drop_last: True
data_folder_name: data
gt_folder_name: gt

model:
backbone:
path_to_weights: /netscratch/experiments_lars_paul/lars/2021-11-15/16-08-51/checkpoints/epoch=1/backbone.pth
header:
in_channels: 512

callbacks:
model_checkpoint:
filename: ${checkpoint_folder_name}dev-rotnet-pt-resnet18-cb55-10-segmentation
watch_model:
log_freq: 1

logger:
wandb:
name: 'dev-rotnet-pt-resnet18-cb55-10-segmetnation'
tags: [ "best_model", "USL" ]
group: 'dev-runs'
notes: "Testing"
11 changes: 4 additions & 7 deletions configs/experiment/dev_rotnet_resnet18_cb55_10.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ defaults:
- /loss: crossentropyloss.yaml
- /metric: accuracy.yaml
- /model/backbone: resnet18.yaml
- /model/header: null
- /model/header: resnet_classification.yaml
- /optimizer: adam.yaml
- /callbacks:
- check_compatibility.yaml
Expand Down Expand Up @@ -55,21 +55,18 @@ datamodule:

model:
header:
_target_: src.models.headers.fully_connected.SingleLinear

num_classes: ${datamodule:num_classes}
# needs to be calculated from the output of the last layer of the backbone (do not forget to flatten!)
input_size: 2048
in_channels: 32768

callbacks:
model_checkpoint:
filename: ${checkpoint_folder_name}dev-rotnet-basic-cnn-cb55-10
filename: ${checkpoint_folder_name}dev-rotnet-resnet18-cb55-10
watch_model:
log_freq: 1

logger:
wandb:
name: 'dev-rotnet-basic-cnn-cb55-10'
name: 'dev-rotnet-resnet18-cb55-10'
tags: [ "best_model", "USL" ]
group: 'dev-runs'
notes: "Testing"
72 changes: 72 additions & 0 deletions configs/experiment/dev_rotnet_resnet50_cb55_10.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# @package _global_

# to execute this experiment run:
# python run.py +experiment=exp_example_full

defaults:
- /plugins: default.yaml
- /task: classification.yaml
- /loss: crossentropyloss.yaml
- /metric: accuracy.yaml
- /model/backbone: resnet50.yaml
- /model/header: resnet_classification.yaml
- /optimizer: adam.yaml
- /callbacks:
- check_compatibility.yaml
- model_checkpoint.yaml
- watch_model_wandb.yaml
- /logger:
- wandb.yaml # set logger here or use command line (e.g. `python run.py logger=wandb`)
- csv.yaml

# we override default configurations with nulls to prevent them from loading at all
# instead we define all modules and their paths directly in this config,
# so everything is stored in one place for more readibility

seed: 42

train: True
test: False

trainer:
_target_: pytorch_lightning.Trainer
gpus: -1
accelerator: 'ddp'
min_epochs: 1
max_epochs: 3
weights_summary: full
precision: 16

task:
confusion_matrix_log_every_n_epoch: 1
confusion_matrix_val: False
confusion_matrix_test: False

datamodule:
_target_: src.datamodules.RotNet.datamodule_cropped.RotNetDivaHisDBDataModuleCropped

data_dir: /netscratch/datasets/semantic_segmentation/datasets_cropped/CB55-10-segmentation
crop_size: 256
num_workers: 4
batch_size: 16
shuffle: True
drop_last: True
data_folder_name: data

model:
header:
# needs to be calculated from the output of the last layer of the backbone (do not forget to flatten!)
in_channels: 131072

callbacks:
model_checkpoint:
filename: ${checkpoint_folder_name}dev-rotnet-resnet50-cb55-10
watch_model:
log_freq: 1

logger:
wandb:
name: 'dev-rotnet-resnet50-cb55-10'
tags: [ "best_model", "USL" ]
group: 'dev-runs'
notes: "Testing"
2 changes: 1 addition & 1 deletion configs/experiment/rotnet_resnet18_cb55_full.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ defaults:
- /loss: crossentropyloss.yaml
- /metric: accuracy.yaml
- /model/backbone: resnet18.yaml
- /model/header: single_layer.yaml
- /model/header: resnet_classification.yaml
- /optimizer: adam.yaml
- /callbacks:
- check_compatibility.yaml
Expand Down
2 changes: 1 addition & 1 deletion configs/experiment/rotnet_resnet18_cb55_train10_last.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ defaults:
- /loss: crossentropyloss.yaml
- /metric: accuracy.yaml
- /model/backbone: resnet18.yaml
- /model/header: single_layer.yaml
- /model/header: resnet_classification.yaml
- /optimizer: adam.yaml
- /callbacks:
- check_compatibility.yaml
Expand Down
2 changes: 1 addition & 1 deletion configs/experiment/rotnet_resnet18_cb55_train19_last.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ defaults:
- /loss: crossentropyloss.yaml
- /metric: accuracy.yaml
- /model/backbone: resnet18.yaml
- /model/header: single_layer.yaml
- /model/header: resnet_classification.yaml
- /optimizer: adam.yaml
- /callbacks:
- check_compatibility.yaml
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
_target_: src.models.headers.fully_connected.SingleLinear
_target_: src.models.headers.fully_connected.ResNetHeader

num_classes: ${datamodule:num_classes}
# needs to be calculated from the output of the last layer of the backbone (do not forget to flatten!)
input_size: 109512
in_channels: 109512
8 changes: 8 additions & 0 deletions configs/model/header/resnet_segmentation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
_target_: src.models.headers.fully_convolution.ResNetFCNHead

#FCN header for resnets. The in_channels are fixed for the different resnet architectures:
#resnet18, 34 = 512
#resnet50, 101, 152 = 2048
in_channels: 512
num_classes: ${datamodule:num_classes}
output_dims: ${datamodule:dims}
25 changes: 12 additions & 13 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,24 +1,23 @@
# --------- pytorch --------- #
torch==1.8.1
torchvision>=0.9.1
pytorch-lightning>=1.4.4
lightning-bolts>=0.3.2
torchmetrics>=0.5.0
torchvision==0.9.1
pytorch-lightning==1.4.8
lightning-bolts==0.4.0
torchmetrics==0.5.1

# --------- hydra --------- #
hydra-core==1.1.0
hydra-colorlog==1.1.0
hydra-optuna-sweeper==1.1.0

# --------- loggers --------- #
wandb>=0.10.31
wandb==0.12.6

# --------- others --------- #
rich
python-dotenv
pre-commit
scikit-learn>=0.23.2
pandas
matplotlib
seaborn
pytest
rich==10.1.0
python-dotenv==0.17.0
scikit-learn==0.24.1
pandas==1.2.4
matplotlib==3.4.1
seaborn==0.11.1
pytest==6.2.3
Loading

0 comments on commit 008f4d0

Please sign in to comment.