Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Fix] Make accuracy take into account ignore_index #1259

Merged
merged 2 commits into from
Feb 14, 2022
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
make accuracy take into account ignore_index
HJoonKwon committed Feb 14, 2022
commit c730122fb01ab1a1e2e68488243310d1dbe05a8d
3 changes: 2 additions & 1 deletion mmseg/models/decode_heads/decode_head.py
Original file line number Diff line number Diff line change
@@ -261,5 +261,6 @@ def losses(self, seg_logit, seg_label):
weight=seg_weight,
ignore_index=self.ignore_index)

loss['acc_seg'] = accuracy(seg_logit, seg_label)
loss['acc_seg'] = accuracy(
seg_logit, seg_label, ignore_index=self.ignore_index)
return loss
3 changes: 2 additions & 1 deletion mmseg/models/decode_heads/point_head.py
Original file line number Diff line number Diff line change
@@ -264,7 +264,8 @@ def losses(self, point_logits, point_label):
loss['point' + loss_module.loss_name] = loss_module(
point_logits, point_label, ignore_index=self.ignore_index)

loss['acc_point'] = accuracy(point_logits, point_label)
loss['acc_point'] = accuracy(
point_logits, point_label, ignore_index=self.ignore_index)
return loss

def get_points_train(self, seg_logits, uncertainty_func, cfg):
13 changes: 9 additions & 4 deletions mmseg/models/losses/accuracy.py
Original file line number Diff line number Diff line change
@@ -2,12 +2,13 @@
import torch.nn as nn


def accuracy(pred, target, topk=1, thresh=None):
def accuracy(pred, target, topk=1, thresh=None, ignore_index=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
target (torch.Tensor): The target of each prediction, shape (N, , ...)
ignore_index (int | None): The label index to be ignored. Default: None
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
@@ -43,17 +44,19 @@ def accuracy(pred, target, topk=1, thresh=None):
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
correct = correct[:, target != ignore_index]
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / target.numel()))
res.append(
correct_k.mul_(100.0 / target[target != ignore_index].numel()))
return res[0] if return_single else res


class Accuracy(nn.Module):
"""Accuracy calculation module."""

def __init__(self, topk=(1, ), thresh=None):
def __init__(self, topk=(1, ), thresh=None, ignore_index=None):
"""Module to calculate the accuracy.
Args:
@@ -65,6 +68,7 @@ def __init__(self, topk=(1, ), thresh=None):
super().__init__()
self.topk = topk
self.thresh = thresh
self.ignore_index = ignore_index

def forward(self, pred, target):
"""Forward function to calculate accuracy.
@@ -76,4 +80,5 @@ def forward(self, pred, target):
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
return accuracy(pred, target, self.topk, self.thresh,
self.ignore_index)