Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Refactor] Support progressive test with fewer memory cost #709

Merged
merged 60 commits into from
Aug 20, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
60 commits
Select commit Hold shift + click to select a range
f3aaecc
Support progressive test with fewer memory cost.
Jul 16, 2021
7c944cf
Temp code
Jul 22, 2021
3a97df6
Using processor to refactor evaluation workflow.
Jul 22, 2021
e6be6b4
refactor eval hook.
Jul 22, 2021
fdb6b49
Fix multi gpu bug when index out of len(dataset).
Jul 22, 2021
8653c96
Fix process bar.
Jul 22, 2021
842bdef
Fix middle save argument.
Jul 22, 2021
d389665
Modify some variable name of dataset evaluate api.
Jul 22, 2021
9349970
Modify some viriable name of eval hook.
Jul 22, 2021
3a3b3ec
Fix some priority bugs of eval hook.
Jul 22, 2021
e03ee57
Depreciated efficient_test.
Jul 24, 2021
c3c491d
Fix training progress blocked by eval hook.
Jul 24, 2021
0cb97f4
Depreciated old test api.
Jul 24, 2021
25943c8
Fix test api error.
Jul 26, 2021
be13436
Modify outer api.
Jul 27, 2021
6bb1c91
Build a sampler test api.
Jul 27, 2021
25a7475
TODO: Refactor format_results.
Jul 27, 2021
25cc8ba
Modify variable names.
Jul 27, 2021
af54432
Fix num_classes bug.
Jul 27, 2021
1b7c976
Fix sampler index bug.
Jul 27, 2021
049b307
Fix grammaly bug.
Jul 27, 2021
dc35f6d
Support batch sampler.
Jul 27, 2021
880fbcb
More readable test api.
Jul 27, 2021
f9eda3e
Remove some command arg and fix eval hook bug.
Jul 27, 2021
7be603d
Support format-only arg.
Jul 28, 2021
4243ea2
Modify format_results of datasets.
Jul 28, 2021
ce44bff
Modify tool which use test apis.
Jul 28, 2021
767ee98
support cityscapes eval
xvjiarui Jul 29, 2021
f69dcd8
fixed cityscapes
xvjiarui Jul 29, 2021
119ad87
1. Add comments for batch_sampler;
Jul 29, 2021
aa3f622
Merge branch 'progressive_test' of github.com:sennnnn/mmsegmentation …
Jul 29, 2021
c185dcf
Add efficient_test doc string.
Jul 29, 2021
e72ad41
Modify test tool to compat old version.
Jul 29, 2021
0f24514
Modify eval hook to compat with old version.
Jul 29, 2021
cfd0fd7
Modify test api to compat old version api.
Jul 29, 2021
1f2e1b0
Sampler explanation.
Jul 29, 2021
4e775a7
update warning
xvjiarui Jul 29, 2021
6022904
Modify deploy_test.py
Jul 29, 2021
f9780a0
compatible with old output, add efficient test back
xvjiarui Jul 30, 2021
ae14d2b
clear logic of exclusive
xvjiarui Jul 30, 2021
3eff09c
Warning about efficient_test.
Jul 30, 2021
41c790f
Modify format_results save folder.
Jul 30, 2021
332fc5c
Fix bugs of format_results.
Jul 30, 2021
0f48cc9
Modify deploy_test.py.
Jul 30, 2021
8f80dd9
Update doc
Aug 10, 2021
8630f32
Fix deploy test bugs.
Aug 10, 2021
ce8e814
Merge Master.
Aug 10, 2021
b3a6264
Merge Master.
Aug 10, 2021
89e182c
Fix custom dataset unit tests.
Aug 10, 2021
1773f99
Fix dataset unit tests.
Aug 10, 2021
b844145
Fix eval hook unit tests.
Aug 10, 2021
3f11345
Fix some imcompatible.
Aug 10, 2021
8662e69
Add pre_eval argument for eval hooks.
Aug 11, 2021
8dea091
Update eval hook doc string.
Aug 11, 2021
fd28ab3
Make pre_eval false in default.
Aug 12, 2021
ebea350
Add unit tests for dataset format_results.
Aug 16, 2021
d0a3528
Fix some comments and bc-breaking bug.
Aug 19, 2021
5ddf909
Merge Master.
Aug 19, 2021
f474721
Fix pre_eval set cfg field.
Aug 19, 2021
469ced8
Remove redundant codes.
Aug 19, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions mmseg/apis/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from .inference import inference_segmentor, init_segmentor, show_result_pyplot
from .test import multi_gpu_test, single_gpu_test
from .test import (multi_gpu_test, progressive_multi_gpu_test,
progressive_single_gpu_test, single_gpu_test)
from .train import get_root_logger, set_random_seed, train_segmentor

__all__ = [
'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor',
'inference_segmentor', 'multi_gpu_test', 'single_gpu_test',
'show_result_pyplot'
'show_result_pyplot', 'progressive_single_gpu_test',
'progressive_multi_gpu_test'
]
152 changes: 150 additions & 2 deletions mmseg/apis/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info

from mmseg.core.evaluation.metrics import intersect_and_union


def np2tmp(array, temp_file_name=None, tmpdir=None):
"""Save ndarray to local numpy file.
Expand Down Expand Up @@ -44,8 +46,8 @@ def single_gpu_test(model,
show (bool): Whether show results during inference. Default: False.
out_dir (str, optional): If specified, the results will be dumped into
the directory to save output results.
efficient_test (bool): Whether save the results as local numpy files to
save CPU memory during evaluation. Default: False.
efficient_test (bool, optional): Whether save the results as local
numpy files to save CPU memory during evaluation. Default: False.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Expand Down Expand Up @@ -163,3 +165,149 @@ def multi_gpu_test(model,
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results


def progressive_single_gpu_test(model,
xvjiarui marked this conversation as resolved.
Show resolved Hide resolved
data_loader,
show=False,
out_dir=None,
opacity=0.5):
model.eval()
dataset = data_loader.dataset
num_classes = len(dataset.CLASSES)
prog_bar = mmcv.ProgressBar(len(dataset))

total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_union = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_label = torch.zeros((num_classes, ), dtype=torch.float64)

cur = 0
for _, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)

if show or out_dir:
img_tensor = data['img'][0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)

for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]

ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))

if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None

model.module.show_result(
img_show,
result,
palette=dataset.PALETTE,
show=show,
out_file=out_file,
opacity=opacity)

for i in range(len(result)):
gt_semantic_map = dataset.get_gt_seg_map(cur + i)

area_intersect, area_union, area_pred_label, area_label = \
intersect_and_union(
result[i], gt_semantic_map, num_classes,
dataset.ignore_index, dataset.label_map,
dataset.reduce_zero_label)

total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label

print(total_area_intersect / total_area_union)

prog_bar.update()

cur += len(result)

return total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label


# TODO: Support distributed test api
def progressive_multi_gpu_test(model,
data_loader,
tmpdir=None,
gpu_collect=False):
xvjiarui marked this conversation as resolved.
Show resolved Hide resolved

model.eval()
dataset = data_loader.dataset
num_classes = len(dataset.CLASSES)
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))

total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_union = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64)
total_area_label = torch.zeros((num_classes, ), dtype=torch.float64)

cur = 0
for _, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)

for i in range(len(result)):
gt_semantic_map = dataset.get_gt_seg_map(cur + i * world_size)

area_intersect, area_union, area_pred_label, area_label = \
intersect_and_union(
result[i], gt_semantic_map, num_classes,
dataset.ignore_index, dataset.label_map,
dataset.reduce_zero_label)

total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label

if rank == 0:
for _ in range(len(result) * world_size):
prog_bar.update()

cur += len(result) * world_size

pixel_count_matrix = [
total_area_intersect, total_area_union, total_area_pred_label,
total_area_label
]
# collect results from all ranks
if gpu_collect:
results = collect_count_results_gpu(pixel_count_matrix, 4 * world_size)
else:
results = collect_count_results_cpu(pixel_count_matrix, 4 * world_size,
tmpdir)
xvjiarui marked this conversation as resolved.
Show resolved Hide resolved
return results


def collect_count_results_gpu(result_part, size):
"""Collect pixel count matrix result under gpu mode.

On gpu mode, this function will encode results to gpu tensors and use gpu
communication for results collection.

Args:
result_part (list[Tensor]): four type of pixel count matrix --
{area_intersect, area_union, area_pred_label, area_label}, These
four tensor shape of (num_classes, ).
size (int): Size of the results, commonly equal to length of
the results.
"""
pass


def collect_count_results_cpu(result_part, size, tmpdir=None):
pass
5 changes: 3 additions & 2 deletions mmseg/core/evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
from .class_names import get_classes, get_palette
from .eval_hooks import DistEvalHook, EvalHook
from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou
from .metrics import (calculate_metrics, eval_metrics, mean_dice, mean_fscore,
mean_iou)

__all__ = [
'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore',
'eval_metrics', 'get_classes', 'get_palette'
'eval_metrics', 'get_classes', 'get_palette', 'calculate_metrics'
]
66 changes: 66 additions & 0 deletions mmseg/core/evaluation/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,3 +324,69 @@ def eval_metrics(results,
for metric, metric_value in ret_metrics.items()
})
return ret_metrics


def calculate_metrics(total_area_intersect,
total_area_union,
total_area_pred_label,
total_area_label,
metrics=['mIoU'],
nan_to_num=None,
beta=1):
"""Calculate evaluation metrics
Args:
results (list[ndarray] | list[str]): List of prediction segmentation
maps or list of prediction result filenames.
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
segmentation maps or list of label filenames.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evaluation metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError('metrics {} is not supported'.format(metrics))

all_acc = total_area_intersect.sum() / total_area_label.sum()
ret_metrics = OrderedDict({'aAcc': all_acc})
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
acc = total_area_intersect / total_area_label
ret_metrics['IoU'] = iou
ret_metrics['Acc'] = acc
elif metric == 'mDice':
dice = 2 * total_area_intersect / (
total_area_pred_label + total_area_label)
acc = total_area_intersect / total_area_label
ret_metrics['Dice'] = dice
ret_metrics['Acc'] = acc
elif metric == 'mFscore':
precision = total_area_intersect / total_area_pred_label
recall = total_area_intersect / total_area_label
f_value = torch.tensor(
[f_score(x[0], x[1], beta) for x in zip(precision, recall)])
ret_metrics['Fscore'] = f_value
ret_metrics['Precision'] = precision
ret_metrics['Recall'] = recall

ret_metrics = {
metric: value.numpy()
for metric, value in ret_metrics.items()
}
if nan_to_num is not None:
ret_metrics = OrderedDict({
metric: np.nan_to_num(metric_value, nan=nan_to_num)
for metric, metric_value in ret_metrics.items()
})
return ret_metrics
84 changes: 84 additions & 0 deletions mmseg/datasets/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from torch.utils.data import Dataset

from mmseg.core import eval_metrics
from mmseg.core.evaluation.metrics import calculate_metrics
from mmseg.utils import get_root_logger
from .builder import DATASETS
from .pipelines import Compose
Expand Down Expand Up @@ -240,6 +241,13 @@ def get_gt_seg_maps(self, efficient_test=False):
gt_seg_maps.append(gt_seg_map)
return gt_seg_maps

def get_gt_seg_map(self, idx):
"""Get ground truth segmentation maps for evaluation."""
seg_map = osp.join(self.ann_dir, self.img_infos[idx]['ann']['seg_map'])
gt_seg_map = mmcv.imread(seg_map, flag='unchanged', backend='pillow')

return gt_seg_map

def get_classes_and_palette(self, classes=None, palette=None):
"""Get class names of current dataset.

Expand Down Expand Up @@ -303,6 +311,82 @@ def get_palette_for_custom_classes(self, class_names, palette=None):

return palette

def progressive_evaluate(self,
results,
metric='mIoU',
logger=None,
**kwargs):
if isinstance(metric, str):
metric = [metric]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if not set(metric).issubset(set(allowed_metrics)):
raise KeyError('metric {} is not supported'.format(metric))

eval_results = {}

total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label = results

ret_metrics = calculate_metrics(total_area_intersect, total_area_union,
total_area_pred_label,
total_area_label, metric)

# Because dataset.CLASSES is required in progressive_single_gpu_test,
# progressive_multi_gpu_test, so it's necessary to keep
# dataset.CLASSES.
class_names = self.CLASSES

# summary table
ret_metrics_summary = OrderedDict({
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})

# each class table
ret_metrics.pop('aAcc', None)
ret_metrics_class = OrderedDict({
ret_metric: np.round(ret_metric_value * 100, 2)
for ret_metric, ret_metric_value in ret_metrics.items()
})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)

# for logger
class_table_data = PrettyTable()
for key, val in ret_metrics_class.items():
class_table_data.add_column(key, val)

summary_table_data = PrettyTable()
for key, val in ret_metrics_summary.items():
if key == 'aAcc':
summary_table_data.add_column(key, [val])
else:
summary_table_data.add_column('m' + key, [val])

print_log('per class results:', logger)
print_log('\n' + class_table_data.get_string(), logger=logger)
print_log('Summary:', logger)
print_log('\n' + summary_table_data.get_string(), logger=logger)

# each metric dict
for key, value in ret_metrics_summary.items():
if key == 'aAcc':
eval_results[key] = value / 100.0
else:
eval_results['m' + key] = value / 100.0

ret_metrics_class.pop('Class', None)
for key, value in ret_metrics_class.items():
eval_results.update({
key + '.' + str(name): value[idx] / 100.0
for idx, name in enumerate(class_names)
})

if mmcv.is_list_of(results, str):
for file_name in results:
os.remove(file_name)
xvjiarui marked this conversation as resolved.
Show resolved Hide resolved
return eval_results

def evaluate(self,
results,
metric='mIoU',
Expand Down
Loading