From 1b382199991a00b8980121bea84c3266b85880e4 Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Wed, 12 Jul 2023 17:17:44 +0200 Subject: [PATCH 1/3] fix --- src/torchmetrics/detection/mean_ap.py | 40 ++++++++++++++++----------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/src/torchmetrics/detection/mean_ap.py b/src/torchmetrics/detection/mean_ap.py index ac6c0e07b38..5542372d436 100644 --- a/src/torchmetrics/detection/mean_ap.py +++ b/src/torchmetrics/detection/mean_ap.py @@ -145,7 +145,15 @@ class MeanAveragePrecision(Metric): Args: box_format: - Input format of given boxes. Supported formats are ``[`xyxy`, `xywh`, `cxcywh`]``. + Input format of given boxes. Supported formats are: + + - 'xyxy': boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right. + - 'xywh' : boxes are represented via corner, width and height, x1, y2 being top left, w, h being + width and height. This is the default format used by pycoco and all input formats will be converted + to this. + - 'cxcywh': boxes are represented via centre, width and height, cx, cy being center of box, w, h being + width and height. + iou_type: Type of input (either masks or bounding-boxes) used for computing IOU. Supported IOU types are ``["bbox", "segm"]``. If using ``"segm"``, masks should be provided in input. @@ -232,7 +240,7 @@ class MeanAveragePrecision(Metric): def __init__( self, - box_format: str = "xyxy", + box_format: Literal["xyxy", "xywh", "cxcywh"] = "xyxy", iou_type: Literal["bbox", "segm"] = "bbox", iou_thresholds: Optional[List[float]] = None, rec_thresholds: Optional[List[float]] = None, @@ -345,27 +353,27 @@ def compute(self) -> dict: coco_target.createIndex() coco_preds.createIndex() - coco_eval = COCOeval(coco_target, coco_preds, iouType=self.iou_type) - coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64) - coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64) - coco_eval.params.maxDets = self.max_detection_thresholds + self.coco_eval = COCOeval(coco_target, coco_preds, iouType=self.iou_type) + self.coco_eval.params.iouThrs = np.array(self.iou_thresholds, dtype=np.float64) + self.coco_eval.params.recThrs = np.array(self.rec_thresholds, dtype=np.float64) + self.coco_eval.params.maxDets = self.max_detection_thresholds - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - stats = coco_eval.stats + self.coco_eval.evaluate() + self.coco_eval.accumulate() + self.coco_eval.summarize() + stats = self.coco_eval.stats # if class mode is enabled, evaluate metrics per class if self.class_metrics: map_per_class_list = [] mar_100_per_class_list = [] for class_id in self._get_classes(): - coco_eval.params.catIds = [class_id] + self.coco_eval.params.catIds = [class_id] with contextlib.redirect_stdout(io.StringIO()): - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - class_stats = coco_eval.stats + self.coco_eval.evaluate() + self.coco_eval.accumulate() + self.coco_eval.summarize() + class_stats = self.coco_eval.stats map_per_class_list.append(torch.tensor([class_stats[0]])) mar_100_per_class_list.append(torch.tensor([class_stats[8]])) @@ -545,7 +553,7 @@ def _get_safe_item_values(self, item: Dict[str, Any]) -> Union[Tensor, Tuple]: if self.iou_type == "bbox": boxes = _fix_empty_tensors(item["boxes"]) if boxes.numel() > 0: - boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xyxy") + boxes = box_convert(boxes, in_fmt=self.box_format, out_fmt="xywh") return boxes if self.iou_type == "segm": masks = [] From 0ac8da41772349bd8a48561cdf43ab803e3407a8 Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Wed, 12 Jul 2023 17:18:50 +0200 Subject: [PATCH 2/3] add tests --- tests/unittests/detection/test_map.py | 29 ++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/tests/unittests/detection/test_map.py b/tests/unittests/detection/test_map.py index 8f432f8c0cf..f1c002428a0 100644 --- a/tests/unittests/detection/test_map.py +++ b/tests/unittests/detection/test_map.py @@ -140,6 +140,7 @@ def test_map(self, iou_type, iou_thresholds, rec_thresholds, ddp): "iou_thresholds": iou_thresholds, "rec_thresholds": rec_thresholds, "class_metrics": False, + "box_format": "xywh", }, check_batch=False, atol=1e-2, @@ -154,7 +155,7 @@ def test_map_classwise(self, iou_type, ddp): target=target, metric_class=MeanAveragePrecision, reference_metric=partial(_compare_again_coco_fn, iou_type=iou_type, class_metrics=True), - metric_args={"iou_type": iou_type, "class_metrics": True}, + metric_args={"box_format": "xywh", "iou_type": iou_type, "class_metrics": True}, check_batch=False, atol=1e-1, ) @@ -656,3 +657,29 @@ def test_device_changing(): metric = metric.cpu() val = metric.compute() assert isinstance(val, dict) + + +@pytest.mark.parametrize( + ("box_format", "iou_val_expected", "map_val_expected"), + [ + ("xyxy", 0.25, 1), + ("xywh", 0.143, 0.0), + ("cxcywh", 0.143, 0.0), + ], +) +def test_for_box_format(box_format, iou_val_expected, map_val_expected): + """Test that only the correct box format lead to a score of 1. + + See issue: https://github.com/Lightning-AI/torchmetrics/issues/1908. + """ + predictions = [ + {"boxes": torch.tensor([[0.5, 0.5, 1, 1]]), "scores": torch.tensor([1.0]), "labels": torch.tensor([0])} + ] + + targets = [{"boxes": torch.tensor([[0, 0, 1, 1]]), "labels": torch.tensor([0])}] + + metric = MeanAveragePrecision(box_format=box_format, iou_thresholds=[0.2]) + metric.update(predictions, targets) + result = metric.compute() + assert result["map"].item() == map_val_expected + assert round(float(metric.coco_eval.ious[(0, 0)]), 3) == iou_val_expected From 8e2eaa06746321189c29a09a971bf75e13031978 Mon Sep 17 00:00:00 2001 From: SkafteNicki Date: Wed, 12 Jul 2023 17:41:31 +0200 Subject: [PATCH 3/3] changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b364b9b2129..210373ae41c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed bug related to `MeanMetric` and broadcasting of weights when Nans are present ([#1898](https://github.com/Lightning-AI/torchmetrics/pull/1898)) +- Fixed bug related to expected input format of pycoco in `MeanAveragePrecision` ([#1913](https://github.com/Lightning-AI/torchmetrics/pull/1913)) + + ## [1.0.0] - 2022-07-04 ### Added