From e85f605046025ed66c313dcc1f4dfba085f1bed5 Mon Sep 17 00:00:00 2001 From: uddavgoshika Date: Wed, 16 Jul 2025 16:04:15 +0530 Subject: [PATCH 1/2] Fix: compute correct IoU in mean_iou metric (#421) --- metrics/mean_iou/mean_iou.py | 310 ++++------------------------------- 1 file changed, 29 insertions(+), 281 deletions(-) diff --git a/metrics/mean_iou/mean_iou.py b/metrics/mean_iou/mean_iou.py index 4c19864d..8423704f 100644 --- a/metrics/mean_iou/mean_iou.py +++ b/metrics/mean_iou/mean_iou.py @@ -11,303 +11,51 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Mean IoU (Intersection-over-Union) metric.""" - -from typing import Dict, Optional - -import datasets -import numpy as np +from typing import List, Optional import evaluate - +import numpy as np _DESCRIPTION = """ -IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union -between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, -the mean IoU of the image is calculated by taking the IoU of each class and averaging them. +This metric computes the mean Intersection-Over-Union (mIoU) for image segmentation tasks. """ _KWARGS_DESCRIPTION = """ Args: - predictions (`List[ndarray]`): - List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. - references (`List[ndarray]`): - List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. - num_labels (`int`): - Number of classes (categories). - ignore_index (`int`): - Index that will be ignored during evaluation. - nan_to_num (`int`, *optional*): - If specified, NaN values will be replaced by the number defined by the user. - label_map (`dict`, *optional*): - If specified, dictionary mapping old label indices to new label indices. - reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, - and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. + predictions (List[int]): Predicted class indices. + references (List[int]): Ground truth class indices. + num_labels (int): Total number of classes. Returns: - `Dict[str, float | ndarray]` comprising various elements: - - *mean_iou* (`float`): - Mean Intersection-over-Union (IoU averaged over all categories). - - *mean_accuracy* (`float`): - Mean accuracy (averaged over all categories). - - *overall_accuracy* (`float`): - Overall accuracy on all images. - - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): - Per category accuracy. - - *per_category_iou* (`ndarray` of shape `(num_labels,)`): - Per category IoU. - -Examples: - - >>> import numpy as np - - >>> mean_iou = evaluate.load("mean_iou") - - >>> # suppose one has 3 different segmentation maps predicted - >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) - >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) - - >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) - >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) - - >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) - >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) - - >>> predicted = [predicted_1, predicted_2, predicted_3] - >>> ground_truth = [actual_1, actual_2, actual_3] - - >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) - >>> print(results) # doctest: +NORMALIZE_WHITESPACE - {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} + mean_iou (float): The mean IoU across all classes. """ -_CITATION = """\ -@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, -author = {{MMSegmentation Contributors}}, -license = {Apache-2.0}, -month = {7}, -title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, -url = {https://github.com/open-mmlab/mmsegmentation}, -year = {2020} -}""" - - -def intersect_and_union( - pred_label, - label, - num_labels, - ignore_index: bool, - label_map: Optional[Dict[int, int]] = None, - reduce_labels: bool = False, -): - """Calculate intersection and Union. - - Args: - pred_label (`ndarray`): - Prediction segmentation map of shape (height, width). - label (`ndarray`): - Ground truth segmentation map of shape (height, width). - num_labels (`int`): - Number of categories. - ignore_index (`int`): - Index that will be ignored during evaluation. - label_map (`dict`, *optional*): - Mapping old labels to new labels. The parameter will work only when label is str. - reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, - and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. - - Returns: - area_intersect (`ndarray`): - The intersection of prediction and ground truth histogram on all classes. - area_union (`ndarray`): - The union of prediction and ground truth histogram on all classes. - area_pred_label (`ndarray`): - The prediction histogram on all classes. - area_label (`ndarray`): - The ground truth histogram on all classes. - """ - if label_map is not None: - for old_id, new_id in label_map.items(): - label[label == old_id] = new_id - - # turn into Numpy arrays - pred_label = np.array(pred_label) - label = np.array(label) - - if reduce_labels: - label[label == 0] = 255 - label = label - 1 - label[label == 254] = 255 - - mask = label != ignore_index - mask = np.not_equal(label, ignore_index) - pred_label = pred_label[mask] - label = np.array(label)[mask] - - intersect = pred_label[pred_label == label] - - area_intersect = np.histogram(intersect, bins=num_labels, range=(0, num_labels - 1))[0] - area_pred_label = np.histogram(pred_label, bins=num_labels, range=(0, num_labels - 1))[0] - area_label = np.histogram(label, bins=num_labels, range=(0, num_labels - 1))[0] - - area_union = area_pred_label + area_label - area_intersect - - return area_intersect, area_union, area_pred_label, area_label - - -def total_intersect_and_union( - results, - gt_seg_maps, - num_labels, - ignore_index: bool, - label_map: Optional[Dict[int, int]] = None, - reduce_labels: bool = False, -): - """Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair. - - Args: - results (`ndarray`): - List of prediction segmentation maps, each of shape (height, width). - gt_seg_maps (`ndarray`): - List of ground truth segmentation maps, each of shape (height, width). - num_labels (`int`): - Number of categories. - ignore_index (`int`): - Index that will be ignored during evaluation. - label_map (`dict`, *optional*): - Mapping old labels to new labels. The parameter will work only when label is str. - reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, - and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. - - Returns: - total_area_intersect (`ndarray`): - The intersection of prediction and ground truth histogram on all classes. - total_area_union (`ndarray`): - The union of prediction and ground truth histogram on all classes. - total_area_pred_label (`ndarray`): - The prediction histogram on all classes. - total_area_label (`ndarray`): - The ground truth histogram on all classes. - """ - total_area_intersect = np.zeros((num_labels,), dtype=np.float64) - total_area_union = np.zeros((num_labels,), dtype=np.float64) - total_area_pred_label = np.zeros((num_labels,), dtype=np.float64) - total_area_label = np.zeros((num_labels,), dtype=np.float64) - for result, gt_seg_map in zip(results, gt_seg_maps): - area_intersect, area_union, area_pred_label, area_label = intersect_and_union( - result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels - ) - total_area_intersect += area_intersect - total_area_union += area_union - total_area_pred_label += area_pred_label - total_area_label += area_label - return total_area_intersect, total_area_union, total_area_pred_label, total_area_label - - -def mean_iou( - results, - gt_seg_maps, - num_labels, - ignore_index: bool, - nan_to_num: Optional[int] = None, - label_map: Optional[Dict[int, int]] = None, - reduce_labels: bool = False, -): - """Calculate Mean Intersection and Union (mIoU). - - Args: - results (`ndarray`): - List of prediction segmentation maps, each of shape (height, width). - gt_seg_maps (`ndarray`): - List of ground truth segmentation maps, each of shape (height, width). - num_labels (`int`): - Number of categories. - ignore_index (`int`): - Index that will be ignored during evaluation. - nan_to_num (`int`, *optional*): - If specified, NaN values will be replaced by the number defined by the user. - label_map (`dict`, *optional*): - Mapping old labels to new labels. The parameter will work only when label is str. - reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, - and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. - - Returns: - `Dict[str, float | ndarray]` comprising various elements: - - *mean_iou* (`float`): - Mean Intersection-over-Union (IoU averaged over all categories). - - *mean_accuracy* (`float`): - Mean accuracy (averaged over all categories). - - *overall_accuracy* (`float`): - Overall accuracy on all images. - - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): - Per category accuracy. - - *per_category_iou* (`ndarray` of shape `(num_labels,)`): - Per category IoU. - """ - total_area_intersect, total_area_union, total_area_pred_label, total_area_label = total_intersect_and_union( - results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels - ) - - # compute metrics - metrics = dict() - - all_acc = total_area_intersect.sum() / total_area_label.sum() - iou = total_area_intersect / total_area_union - acc = total_area_intersect / total_area_label - - metrics["mean_iou"] = np.nanmean(iou) - metrics["mean_accuracy"] = np.nanmean(acc) - metrics["overall_accuracy"] = all_acc - metrics["per_category_iou"] = iou - metrics["per_category_accuracy"] = acc - - if nan_to_num is not None: - metrics = dict( - {metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()} - ) - - return metrics - - -@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class MeanIoU(evaluate.Metric): def _info(self): return evaluate.MetricInfo( description=_DESCRIPTION, - citation=_CITATION, + citation="", inputs_description=_KWARGS_DESCRIPTION, - features=datasets.Features( - { - "predictions": datasets.Image(), - "references": datasets.Image(), - } - ), - reference_urls=[ - "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" - ], + features=evaluate.Features({ + 'predictions': evaluate.Sequence(evaluate.Value('int64')), + 'references': evaluate.Sequence(evaluate.Value('int64')), + }), + reference_urls=[] ) - def _compute( - self, - predictions, - references, - num_labels: int, - ignore_index: bool, - nan_to_num: Optional[int] = None, - label_map: Optional[Dict[int, int]] = None, - reduce_labels: bool = False, - ): - iou_result = mean_iou( - results=predictions, - gt_seg_maps=references, - num_labels=num_labels, - ignore_index=ignore_index, - nan_to_num=nan_to_num, - label_map=label_map, - reduce_labels=reduce_labels, - ) - return iou_result + def _compute(self, predictions, references, num_labels): + predictions = np.array(predictions) + references = np.array(references) + iou_list = [] + + for label in range(num_labels): + tp = np.sum((predictions == label) & (references == label)) + fp = np.sum((predictions == label) & (references != label)) + fn = np.sum((predictions != label) & (references == label)) + + denom = tp + fp + fn + 1e-10 # Prevent division by zero + iou = tp / denom if denom != 0 else 0.0 + iou_list.append(iou) + + mean_iou = np.mean(iou_list) + return {"mean_iou": mean_iou} From 71cee97a36f3c207bfd1b2a4560e37ffa33ecdcb Mon Sep 17 00:00:00 2001 From: uddavgoshika Date: Wed, 16 Jul 2025 16:20:40 +0530 Subject: [PATCH 2/2] Fix: corrected mean IoU logic and updated PR (#421) --- metrics/mean_iou/mean_iou.py | 274 +++++++++++++++++++++++++++++++++-- 1 file changed, 261 insertions(+), 13 deletions(-) diff --git a/metrics/mean_iou/mean_iou.py b/metrics/mean_iou/mean_iou.py index 8423704f..0911ad76 100644 --- a/metrics/mean_iou/mean_iou.py +++ b/metrics/mean_iou/mean_iou.py @@ -11,36 +11,284 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Mean IoU (Intersection-over-Union) metric.""" -from typing import List, Optional -import evaluate +from typing import Dict, Optional + +import datasets import numpy as np +import evaluate + + _DESCRIPTION = """ -This metric computes the mean Intersection-Over-Union (mIoU) for image segmentation tasks. +IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union +between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, +the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _KWARGS_DESCRIPTION = """ Args: - predictions (List[int]): Predicted class indices. - references (List[int]): Ground truth class indices. - num_labels (int): Total number of classes. + predictions (`List[ndarray]`): + List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. + references (`List[ndarray]`): + List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. + num_labels (`int`): + Number of classes (categories). + ignore_index (`int`): + Index that will be ignored during evaluation. + nan_to_num (`int`, *optional*): + If specified, NaN values will be replaced by the number defined by the user. + label_map (`dict`, *optional*): + If specified, dictionary mapping old label indices to new label indices. + reduce_labels (`bool`, *optional*, defaults to `False`): + Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, + and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: - mean_iou (float): The mean IoU across all classes. + `Dict[str, float | ndarray]` comprising various elements: + - *mean_iou* (`float`): + Mean Intersection-over-Union (IoU averaged over all categories). + - *mean_accuracy* (`float`): + Mean accuracy (averaged over all categories). + - *overall_accuracy* (`float`): + Overall accuracy on all images. + - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): + Per category accuracy. + - *per_category_iou* (`ndarray` of shape `(num_labels,)`): + Per category IoU. + +Examples: + + >>> import numpy as np + + >>> mean_iou = evaluate.load("mean_iou") + + >>> # suppose one has 3 different segmentation maps predicted + >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) + >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) + + >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) + >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) + + >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) + >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) + + >>> predicted = [predicted_1, predicted_2, predicted_3] + >>> ground_truth = [actual_1, actual_2, actual_3] + + >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) + >>> print(results) # doctest: +NORMALIZE_WHITESPACE + {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ +_CITATION = """\ +@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, +author = {{MMSegmentation Contributors}}, +license = {Apache-2.0}, +month = {7}, +title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, +url = {https://github.com/open-mmlab/mmsegmentation}, +year = {2020} +}""" + + +def intersect_and_union( + pred_label, + label, + num_labels, + ignore_index: bool, + label_map: Optional[Dict[int, int]] = None, + reduce_labels: bool = False, +): + """Calculate intersection and Union. + + Args: + pred_label (`ndarray`): + Prediction segmentation map of shape (height, width). + label (`ndarray`): + Ground truth segmentation map of shape (height, width). + num_labels (`int`): + Number of categories. + ignore_index (`int`): + Index that will be ignored during evaluation. + label_map (`dict`, *optional*): + Mapping old labels to new labels. The parameter will work only when label is str. + reduce_labels (`bool`, *optional*, defaults to `False`): + Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, + and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. + + Returns: + area_intersect (`ndarray`): + The intersection of prediction and ground truth histogram on all classes. + area_union (`ndarray`): + The union of prediction and ground truth histogram on all classes. + area_pred_label (`ndarray`): + The prediction histogram on all classes. + area_label (`ndarray`): + The ground truth histogram on all classes. + """ + if label_map is not None: + for old_id, new_id in label_map.items(): + label[label == old_id] = new_id + + # turn into Numpy arrays + pred_label = np.array(pred_label) + label = np.array(label) + + if reduce_labels: + label[label == 0] = 255 + label = label - 1 + label[label == 254] = 255 + + mask = label != ignore_index + mask = np.not_equal(label, ignore_index) + pred_label = pred_label[mask] + label = np.array(label)[mask] + + intersect = pred_label[pred_label == label] + + area_intersect = np.histogram(intersect, bins=num_labels, range=(0, num_labels - 1))[0] + area_pred_label = np.histogram(pred_label, bins=num_labels, range=(0, num_labels - 1))[0] + area_label = np.histogram(label, bins=num_labels, range=(0, num_labels - 1))[0] + + area_union = area_pred_label + area_label - area_intersect + + return area_intersect, area_union, area_pred_label, area_label + + +def total_intersect_and_union( + results, + gt_seg_maps, + num_labels, + ignore_index: bool, + label_map: Optional[Dict[int, int]] = None, + reduce_labels: bool = False, +): + """Calculate Total Intersection and Union, by calculating `intersect_and_union` for each (predicted, ground truth) pair. + + Args: + results (`ndarray`): + List of prediction segmentation maps, each of shape (height, width). + gt_seg_maps (`ndarray`): + List of ground truth segmentation maps, each of shape (height, width). + num_labels (`int`): + Number of categories. + ignore_index (`int`): + Index that will be ignored during evaluation. + label_map (`dict`, *optional*): + Mapping old labels to new labels. The parameter will work only when label is str. + reduce_labels (`bool`, *optional*, defaults to `False`): + Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, + and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. + + Returns: + total_area_intersect (`ndarray`): + The intersection of prediction and ground truth histogram on all classes. + total_area_union (`ndarray`): + The union of prediction and ground truth histogram on all classes. + total_area_pred_label (`ndarray`): + The prediction histogram on all classes. + total_area_label (`ndarray`): + The ground truth histogram on all classes. + """ + total_area_intersect = np.zeros((num_labels,), dtype=np.float64) + total_area_union = np.zeros((num_labels,), dtype=np.float64) + total_area_pred_label = np.zeros((num_labels,), dtype=np.float64) + total_area_label = np.zeros((num_labels,), dtype=np.float64) + for result, gt_seg_map in zip(results, gt_seg_maps): + area_intersect, area_union, area_pred_label, area_label = intersect_and_union( + result, gt_seg_map, num_labels, ignore_index, label_map, reduce_labels + ) + total_area_intersect += area_intersect + total_area_union += area_union + total_area_pred_label += area_pred_label + total_area_label += area_label + return total_area_intersect, total_area_union, total_area_pred_label, total_area_label + + +def mean_iou( + results, + gt_seg_maps, + num_labels, + ignore_index: bool, + nan_to_num: Optional[int] = None, + label_map: Optional[Dict[int, int]] = None, + reduce_labels: bool = False, +): + """Calculate Mean Intersection and Union (mIoU). + + Args: + results (`ndarray`): + List of prediction segmentation maps, each of shape (height, width). + gt_seg_maps (`ndarray`): + List of ground truth segmentation maps, each of shape (height, width). + num_labels (`int`): + Number of categories. + ignore_index (`int`): + Index that will be ignored during evaluation. + nan_to_num (`int`, *optional*): + If specified, NaN values will be replaced by the number defined by the user. + label_map (`dict`, *optional*): + Mapping old labels to new labels. The parameter will work only when label is str. + reduce_labels (`bool`, *optional*, defaults to `False`): + Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, + and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. + + Returns: + `Dict[str, float | ndarray]` comprising various elements: + - *mean_iou* (`float`): + Mean Intersection-over-Union (IoU averaged over all categories). + - *mean_accuracy* (`float`): + Mean accuracy (averaged over all categories). + - *overall_accuracy* (`float`): + Overall accuracy on all images. + - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): + Per category accuracy. + - *per_category_iou* (`ndarray` of shape `(num_labels,)`): + Per category IoU. + """ + total_area_intersect, total_area_union, total_area_pred_label, total_area_label = total_intersect_and_union( + results, gt_seg_maps, num_labels, ignore_index, label_map, reduce_labels + ) + + # compute metrics + metrics = dict() + + all_acc = total_area_intersect.sum() / total_area_label.sum() + iou = total_area_intersect / total_area_union + acc = total_area_intersect / total_area_label + + metrics["mean_iou"] = np.nanmean(iou) + metrics["mean_accuracy"] = np.nanmean(acc) + metrics["overall_accuracy"] = all_acc + metrics["per_category_iou"] = iou + metrics["per_category_accuracy"] = acc + + if nan_to_num is not None: + metrics = dict( + {metric: np.nan_to_num(metric_value, nan=nan_to_num) for metric, metric_value in metrics.items()} + ) + + return metrics + + +@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class MeanIoU(evaluate.Metric): def _info(self): return evaluate.MetricInfo( description=_DESCRIPTION, - citation="", + citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, - features=evaluate.Features({ - 'predictions': evaluate.Sequence(evaluate.Value('int64')), - 'references': evaluate.Sequence(evaluate.Value('int64')), - }), - reference_urls=[] + features=datasets.Features( + { + "predictions": datasets.Image(), + "references": datasets.Image(), + } + ), + reference_urls=[ + "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" + ], ) def _compute(self, predictions, references, num_labels):