From c9f9617d712d9f0d3bede1cf385b49758485a1b3 Mon Sep 17 00:00:00 2001 From: yancong Date: Fri, 19 Aug 2022 12:21:13 +0800 Subject: [PATCH] dev(mmeval/core): remove mistake added files --- mmeval/classification/accuracy.py | 139 ------------------------------ mmeval/core/base_metric.py | 76 ---------------- 2 files changed, 215 deletions(-) delete mode 100644 mmeval/classification/accuracy.py delete mode 100644 mmeval/core/base_metric.py diff --git a/mmeval/classification/accuracy.py b/mmeval/classification/accuracy.py deleted file mode 100644 index 0a7abb2f..00000000 --- a/mmeval/classification/accuracy.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -import numpy as np -import torch -from typing import List, Tuple - -from mmeval.core.base_metric import BaseMetric -from mmeval.core.dispatcher import dispatch -from mmeval.core import set_default_dist_backend - -set_default_dist_backend('torch_cpu') - - -class Accuracy(BaseMetric): - - def __init__(self, topk=(1, ), thrs=0., dataset_meta=None, dist_backend=None): - super().__init__(dataset_meta=dataset_meta, dist_backend=dist_backend) - - if isinstance(topk, int): - self.topk = (topk, ) - else: - self.topk = tuple(topk) - - if isinstance(thrs, float) or thrs is None: - self.thrs = (thrs, ) - else: - self.thrs = tuple(thrs) - - def add(self, predictions, labels): - for pred, label in zip(predictions, labels): - self._results.append((pred, label)) - - @dispatch - def compute_metric(self, results: List[Tuple['torch.Tensor', 'torch.Tensor']]): - start_t = time.time() - - labels = torch.stack([res[1] for res in results]) - predictions = torch.stack([res[0] for res in results]) - - if predictions.ndim == 1: - # For pred label, ignore topk and acc - predictions = predictions.int() - correct = predictions.eq(labels).float().sum(0, keepdim=True) - acc = correct.mul_(100. / labels.size(0)) - return acc - - maxk = max(self.topk) - - pred_score, pred_label = predictions.topk(maxk, dim=1) - pred_label = pred_label.t() - correct = pred_label.eq(labels.view(1, -1).expand_as(pred_label)) - results = [] - for k in self.topk: - results.append([]) - for thr in self.thrs: - # Only prediction values larger than thr are counted - # as correct - _correct = correct - if thr is not None: - _correct = _correct & (pred_score.t() > thr) - correct_k = _correct[:k].reshape(-1).float().sum(0, keepdim=True) - acc = correct_k.mul_(100. / labels.size(0)) - results[-1].append(acc) - return results, time.time() - start_t - - @dispatch - def compute_metric(self, results: List[Tuple[np.ndarray, np.int64]]): - start_t = time.time() - - labels = np.stack([res[1] for res in results]) - predictions = np.stack([res[0] for res in results]) - - if predictions.ndim == 1: - # For pred label, ignore topk and acc - predictions = predictions.astype(np.int32) - correct = (predictions == labels).astype(np.float32).sum(0, keepdims=True) - acc = np.multiply(correct, 100. / labels.size) - return acc - - maxk = max(self.topk) - - pred_score, pred_label = torch.from_numpy(predictions).topk(maxk, dim=1) - pred_score, pred_label = pred_score.numpy(), pred_label.numpy() - # pred_score, pred_label = self.topk_by_sort(predictions, maxk, 1) - pred_label = pred_label.T - correct = (pred_label == np.broadcast_to(labels.reshape(1, -1), pred_label.shape)) - results = [] - for k in self.topk: - results.append([]) - for thr in self.thrs: - # Only prediction values larger than thr are counted - # as correct - _correct = correct - if thr is not None: - _correct = _correct & (pred_score.T > thr) - correct_k = _correct[:k].reshape(-1).astype(np.float32).sum(0, keepdims=True) - acc = np.multiply(correct_k, 100. / labels.size) - results[-1].append(acc) - return results, time.time() - start_t - - @dispatch - def compute_metric(self, results: List[Tuple['tensorflow.Tensor', 'tensorflow.Tensor']]): - ... - - def topk_by_sort(self, input, k, axis=None, ascending=True): - if not ascending: - input *= -1 - ind = np.argsort(input, axis=axis) - ind = np.take(ind, np.arange(k), axis=axis) - if not ascending: - input *= -1 - val = np.take_along_axis(input, ind, axis=axis) - return val, ind - - -if __name__=="__main__": - import time - - accuracy = Accuracy(topk=3) - np_accuracy = Accuracy(topk=3) - - batch_size = 128 - class_num = 1000 - - for i in range(10000): - predictions = torch.rand(batch_size, class_num) - labels = torch.randint(low=0, high=1000, size=(batch_size, )) - accuracy.add(predictions, labels) - np_accuracy.add(predictions.numpy(), labels.numpy()) - - start_t = time.time() - acc, t = accuracy.compute() - plum_t = time.time() - t - start_t - print(f'Accuracy: {acc}, compute cost: {t} s, plum cost: {plum_t} s') - - start_t = time.time() - np_acc, t = np_accuracy.compute() - plum_t = time.time() - t - start_t - print(f'NumpyAccuracy: {np_acc}, compute cost: {t} s, plum cost: {plum_t} s') \ No newline at end of file diff --git a/mmeval/core/base_metric.py b/mmeval/core/base_metric.py deleted file mode 100644 index b7f91dc3..00000000 --- a/mmeval/core/base_metric.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. - -from abc import ABCMeta, abstractmethod - -from mmeval.core.dist import get_dist_backend - - -class BaseMetric(metaclass=ABCMeta): - - def __init__(self, - dataset_meta=None, - dist_subset_merge_method='unzip', - dist_backend=None, - logger=None): - self.dataset_meta = dataset_meta - assert dist_subset_merge_method in ('cat', 'unzip') - self.dist_subset_merge_method = dist_subset_merge_method - self._logger = logger - - self.dist_comm = get_dist_backend(dist_backend) - self._results = [] - - @property - def dataset_meta(self): - return self._dataset_meta - - @dataset_meta.setter - def dataset_meta(self, dataset_meta): - self._dataset_meta = dataset_meta - - @property - def name(self): - return self.__class__.__name__ - - def reset(self): - self._results.clear() - - def __call__(self, *args, **kwargs): - cache_results = self._results - self._results = [] - self.add(*args, **kwargs) - metric_result = self.compute() - self._results = cache_results - return metric_result - - def compute(self, size=None): - if not self.dist_comm.is_initialized or self.dist_comm.world_size == 1: - return self.compute_metric(self._results) - - global_results = self.dist_comm.all_gather_object(self._results) - - if self.dist_subset_merge_method == 'cat': - collected_results = sum(global_results, []) - else: - collected_results = [] - for partial_result in zip(*global_results): - collected_results.extend(list(partial_result)) - - if size is not None: - collected_results = collected_results[:size] - - if self.dist_comm.rank == 0: - metric_result = self.compute_metrics(collected_results) - else: - metric_result = None - - global_metric_result = self.dist_comm.brodcast_object(metric_result) - return global_metric_result - - @abstractmethod - def add(self, *args, **kwargs): - ... - - @abstractmethod - def compute_metric(self, results): - ... \ No newline at end of file