-
Notifications
You must be signed in to change notification settings - Fork 2.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Feature] Add SOD datasets #913
Changes from all commits
bbfdd9b
7a0bc0b
836766e
13e3db9
5581198
ebf8254
4cc8aeb
1994c78
a7ae896
b6526af
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
# dataset settings | ||
dataset_type = 'DUTSDataset' | ||
data_root = 'data/DUTS' | ||
img_norm_cfg = dict( | ||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) | ||
img_scale = (352, 352) | ||
crop_size = (320, 320) | ||
train_pipeline = [ | ||
dict(type='LoadImageFromFile'), | ||
dict(type='LoadAnnotations'), | ||
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), | ||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), | ||
dict(type='RandomFlip', prob=0.5), | ||
dict(type='PhotoMetricDistortion'), | ||
dict(type='Normalize', **img_norm_cfg), | ||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), | ||
dict(type='DefaultFormatBundle'), | ||
dict(type='Collect', keys=['img', 'gt_semantic_seg']) | ||
] | ||
test_pipeline = [ | ||
dict(type='LoadImageFromFile'), | ||
dict( | ||
type='MultiScaleFlipAug', | ||
img_scale=img_scale, | ||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], | ||
flip=False, | ||
transforms=[ | ||
dict(type='Resize', keep_ratio=True), | ||
dict(type='RandomFlip'), | ||
dict(type='Normalize', **img_norm_cfg), | ||
dict(type='ImageToTensor', keys=['img']), | ||
dict(type='Collect', keys=['img']) | ||
]) | ||
] | ||
|
||
data = dict( | ||
samples_per_gpu=4, | ||
workers_per_gpu=4, | ||
train=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
img_dir='images/training', | ||
ann_dir='annotations/training', | ||
pipeline=train_pipeline), | ||
val=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
img_dir='images/validation', | ||
ann_dir='annotations/validation', | ||
pipeline=test_pipeline), | ||
test=dict( | ||
type=dataset_type, | ||
data_root=data_root, | ||
img_dir='images/validation', | ||
ann_dir='annotations/validation', | ||
pipeline=test_pipeline)) |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -108,6 +108,28 @@ mmsegmentation | |
| | └── leftImg8bit | ||
| | | └── test | ||
| | | └── night | ||
| ├── DUTS | ||
│ │ ├── images | ||
│ │ │ ├── training | ||
│ │ │ ├── validation | ||
│ │ ├── annotations | ||
│ │ │ ├── training | ||
│ │ │ ├── validation | ||
| ├── DUT-OMRON | ||
│ │ ├── images | ||
│ │ │ ├── validation | ||
│ │ ├── annotations | ||
│ │ │ ├── validation | ||
| ├── ECSSD | ||
│ │ ├── images | ||
│ │ │ ├── validation | ||
│ │ ├── annotations | ||
│ │ │ ├── validation | ||
| ├── HKU-IS | ||
│ │ ├── images | ||
│ │ │ ├── validation | ||
│ │ ├── annotations | ||
│ │ │ ├── validation | ||
``` | ||
|
||
### Cityscapes | ||
|
@@ -253,3 +275,49 @@ Since we only support test models on this dataset, you may only download [the va | |
### Nighttime Driving | ||
|
||
Since we only support test models on this dataset, you may only download [the test set](http://data.vision.ee.ethz.ch/daid/NighttimeDriving/NighttimeDrivingTest.zip). | ||
|
||
### DUTS | ||
|
||
First,download [DUTS-TR.zip](http://saliencydetection.net/duts/download/DUTS-TR.zip) and [DUTS-TE.zip](http://saliencydetection.net/duts/download/DUTS-TE.zip) . | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Three |
||
|
||
To convert DUTS dataset to MMSegmentation format, you should run the following command: | ||
|
||
```shell | ||
python tools/convert_datasets/duts.py /path/to/DUTS-TR.zip /path/to/DUTS-TE.zip | ||
``` | ||
|
||
### DUT-OMRON | ||
|
||
In salient object detection (SOD), DUT-OMRON is used for evaluation. | ||
|
||
First,download [DUT-OMRON-image.zip](http://saliencydetection.net/dut-omron/download/DUT-OMRON-image.zip) and [DUT-OMRON-gt-pixelwise.zip.zip](http://saliencydetection.net/dut-omron/download/DUT-OMRON-gt-pixelwise.zip.zip) . | ||
|
||
To convert DUT-OMRON dataset to MMSegmentation format, you should run the following command: | ||
|
||
```shell | ||
python tools/convert_datasets/dut_omron.py /path/to/DUT-OMRON-image.zip /path/to/DUT-OMRON-gt-pixelwise.zip.zip | ||
``` | ||
|
||
### ECSSD | ||
|
||
In salient object detection (SOD), ECSSD is used for evaluation. | ||
|
||
First,download [images.zip](https://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/data/ECSSD/images.zip) and [ground_truth_mask.zip](https://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/data/ECSSD/ground_truth_mask.zip) . | ||
|
||
To convert ECSSD dataset to MMSegmentation format, you should run the following command: | ||
|
||
```shell | ||
python tools/convert_datasets/ecssd.py /path/to/images.zip /path/to/ground_truth_mask.zip | ||
``` | ||
|
||
### HKU-IS | ||
|
||
In salient object detection (SOD), HKU-IS is used for evaluation. | ||
|
||
First,download [HKU-IS.rar](https://sites.google.com/site/ligb86/mdfsaliency/). | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I cannot access https://sites.google.com/site/ligb86/mdfsaliency/ |
||
|
||
To convert HKU-IS dataset to MMSegmentation format, you should run the following command: | ||
|
||
```shell | ||
python tools/convert_datasets/hku_is.py /path/to/HKU-IS.rar | ||
``` | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could modify the Chinese document accordingly. |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -38,6 +38,7 @@ def single_gpu_test(model, | |
efficient_test=False, | ||
opacity=0.5, | ||
pre_eval=False, | ||
return_logit=False, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. docstring for this new argument. |
||
format_only=False, | ||
format_args={}): | ||
"""Test with single GPU by progressive mode. | ||
|
@@ -88,7 +89,8 @@ def single_gpu_test(model, | |
|
||
for batch_indices, data in zip(loader_indices, data_loader): | ||
with torch.no_grad(): | ||
result = model(return_loss=False, **data) | ||
result = model( | ||
return_loss=False, return_logit=return_logit, **data) | ||
|
||
if efficient_test: | ||
result = [np2tmp(_, tmpdir='.efficient_test') for _ in result] | ||
|
@@ -99,7 +101,8 @@ def single_gpu_test(model, | |
if pre_eval: | ||
# TODO: adapt samples_per_gpu > 1. | ||
# only samples_per_gpu=1 valid now | ||
result = dataset.pre_eval(result, indices=batch_indices) | ||
result = dataset.pre_eval( | ||
result, return_logit, indices=batch_indices) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
||
results.extend(result) | ||
|
||
|
@@ -142,6 +145,7 @@ def multi_gpu_test(model, | |
gpu_collect=False, | ||
efficient_test=False, | ||
pre_eval=False, | ||
return_logit=False, | ||
format_only=False, | ||
format_args={}): | ||
"""Test model with multiple gpus by progressive mode. | ||
|
@@ -204,7 +208,11 @@ def multi_gpu_test(model, | |
|
||
for batch_indices, data in zip(loader_indices, data_loader): | ||
with torch.no_grad(): | ||
result = model(return_loss=False, rescale=True, **data) | ||
result = model( | ||
return_loss=False, | ||
return_logit=return_logit, | ||
rescale=True, | ||
**data) | ||
|
||
if efficient_test: | ||
result = [np2tmp(_, tmpdir='.efficient_test') for _ in result] | ||
|
@@ -215,7 +223,8 @@ def multi_gpu_test(model, | |
if pre_eval: | ||
# TODO: adapt samples_per_gpu > 1. | ||
# only samples_per_gpu=1 valid now | ||
result = dataset.pre_eval(result, indices=batch_indices) | ||
result = dataset.pre_eval( | ||
result, return_logit, indices=batch_indices) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
|
||
results.extend(result) | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,10 +2,11 @@ | |
from .class_names import get_classes, get_palette | ||
from .eval_hooks import DistEvalHook, EvalHook | ||
from .metrics import (eval_metrics, intersect_and_union, mean_dice, | ||
mean_fscore, mean_iou, pre_eval_to_metrics) | ||
mean_fscore, mean_iou, pre_eval_to_metrics, | ||
pre_eval_to_sod_metrics, eval_sod_metrics, calc_sod_metrics) | ||
|
||
__all__ = [ | ||
'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore', | ||
'eval_metrics', 'get_classes', 'get_palette', 'pre_eval_to_metrics', | ||
'intersect_and_union' | ||
'intersect_and_union', 'calc_sod_metrics', 'eval_sod_metrics', 'pre_eval_to_sod_metrics' | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Code style is different in many files. |
||
] |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -30,9 +30,11 @@ def __init__(self, | |
by_epoch=False, | ||
efficient_test=False, | ||
pre_eval=False, | ||
return_logit=False, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. add docstr for this argument |
||
**kwargs): | ||
super().__init__(*args, by_epoch=by_epoch, **kwargs) | ||
self.pre_eval = pre_eval | ||
self.return_logit = return_logit | ||
if efficient_test: | ||
warnings.warn( | ||
'DeprecationWarning: ``efficient_test`` for evaluation hook ' | ||
|
@@ -47,7 +49,11 @@ def _do_evaluate(self, runner): | |
|
||
from mmseg.apis import single_gpu_test | ||
results = single_gpu_test( | ||
runner.model, self.dataloader, show=False, pre_eval=self.pre_eval) | ||
runner.model, | ||
self.dataloader, | ||
show=False, | ||
pre_eval=self.pre_eval, | ||
return_logit=self.return_logit) | ||
runner.log_buffer.clear() | ||
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) | ||
key_score = self.evaluate(runner, results) | ||
|
@@ -77,9 +83,11 @@ def __init__(self, | |
by_epoch=False, | ||
efficient_test=False, | ||
pre_eval=False, | ||
return_logit=False, | ||
**kwargs): | ||
super().__init__(*args, by_epoch=by_epoch, **kwargs) | ||
self.pre_eval = pre_eval | ||
self.return_logit = return_logit | ||
if efficient_test: | ||
warnings.warn( | ||
'DeprecationWarning: ``efficient_test`` for evaluation hook ' | ||
|
@@ -115,7 +123,8 @@ def _do_evaluate(self, runner): | |
self.dataloader, | ||
tmpdir=tmpdir, | ||
gpu_collect=self.gpu_collect, | ||
pre_eval=self.pre_eval) | ||
pre_eval=self.pre_eval, | ||
return_logit=self.return_logit) | ||
|
||
runner.log_buffer.clear() | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can we use
concat dataset
(#833) and do evaluation seperately.