diff --git a/mmdet3d/datasets/nuscenes_dataset.py b/mmdet3d/datasets/nuscenes_dataset.py index 6811a2b4f7..a7d4b06375 100644 --- a/mmdet3d/datasets/nuscenes_dataset.py +++ b/mmdet3d/datasets/nuscenes_dataset.py @@ -98,6 +98,14 @@ class NuScenesDataset(Custom3DDataset): 'vehicle.parked', 'vehicle.stopped', ] + # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa + ErrNameMapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + 'attr_err': 'mAAE' + } CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier') @@ -404,6 +412,10 @@ def _evaluate_single(self, for k, v in metrics['label_tp_errors'][name].items(): val = float('{:.4f}'.format(v)) detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix, + self.ErrNameMapping[k])] = val detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap'] diff --git a/mmdet3d/datasets/nuscenes_mono_dataset.py b/mmdet3d/datasets/nuscenes_mono_dataset.py index 32431e2915..37d1aff037 100644 --- a/mmdet3d/datasets/nuscenes_mono_dataset.py +++ b/mmdet3d/datasets/nuscenes_mono_dataset.py @@ -58,6 +58,14 @@ class NuScenesMonoDataset(CocoDataset): 'barrier': '', 'traffic_cone': '', } + # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa + ErrNameMapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + 'attr_err': 'mAAE' + } def __init__(self, data_root, @@ -423,6 +431,10 @@ def _evaluate_single(self, for k, v in metrics['label_tp_errors'][name].items(): val = float('{:.4f}'.format(v)) detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix, + self.ErrNameMapping[k])] = val detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']