-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmetrics.py
143 lines (112 loc) · 4.46 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
#!/home/workboots/VirtualEnvs/aiml/bin/python3
# -*- encoding: utf-8 -*-
# Birth: 2022-06-01 13:37:43.576184507 +0530
# Modify: 2022-09-07 11:13:12.533751844 +0530
"""Metrics to be calculated for the model."""
from typing import MutableSequence
import numpy as np
from sklearn.metrics import precision_recall_fscore_support as prfs
__author__ = "Upal Bhattacharya"
__license__ = ""
__copyright__ = ""
__version__ = "1.0"
__email__ = "upal.bhattacharya@gmail.com"
def custom_f1(outputs_batch: MutableSequence,
targets_batch: MutableSequence, target_names: list[str]) -> dict:
"""Calculate per class and macro F1 between the given predictions
and targets
Parameters
----------
outputs_batch : MutableSequence
Predictions of a batch.
targets_batch : MutableSequence
Targets of the batch.
target_names : list[str]
Names of targets.
Returns
-------
scores : dict
Dictionary containing the metric values.
"""
per_class_prec = []
per_class_rec = []
num_classes = targets_batch.shape[-1]
for cls in range(num_classes):
tp = np.dot(targets_batch[:, cls], outputs_batch[:, cls])
pp = np.sum(outputs_batch[:, cls])
p = np.sum(targets_batch[:, cls])
prec = tp/pp if pp != 0 else 0
rec = tp/p if p != 0 else 0
per_class_prec.append(prec)
per_class_rec.append(rec)
den = [per_class_prec[i] + per_class_rec[i]
for i in range(len(per_class_rec))]
num = [2 * (per_class_prec[i] * per_class_rec[i])
for i in range(len(per_class_rec))]
per_class_f1 = [num_val * 1./den_val if den_val != 0 else 0
for num_val, den_val in zip(num, den)]
macro_f1 = sum(per_class_f1) * 1./len(per_class_f1)
# Converting metrics to dictionaries for easier understanding
per_class_prec = {
k: per_class_prec[i] for i, k in enumerate(target_names)}
per_class_rec = {
k: per_class_rec[i] for i, k in enumerate(target_names)}
per_class_f1 = {
k: per_class_f1[i] for i, k in enumerate(target_names)}
scores = {
'precision': per_class_prec,
'recall': per_class_rec,
'f1': per_class_f1,
'macro_f1': macro_f1,
}
return scores
def f1(outputs_batch: MutableSequence,
targets_batch: MutableSequence, target_names: list[str]) -> dict:
"""Calculate per class and macro F1 between the given predictions
and targets
Parameters
----------
outputs_batch : MutableSequence
Predictions of a batch.
targets_batch : MutableSequence
Targets of the batch.
target_names : list[str]
Names of targets.
Returns
-------
scores : dict
Dictionary containing the metric values.
"""
class_metrics = prfs(targets_batch, outputs_batch, average=None)
per_class_prec, per_class_rec, per_class_f1, per_class_sup = class_metrics
macro_metrics = prfs(targets_batch, outputs_batch, average='macro')
macro_prec, macro_rec, macro_f1, macro_sup = macro_metrics
micro_metrics = prfs(targets_batch, outputs_batch, average='micro')
micro_prec, micro_rec, micro_f1, micro_sup = micro_metrics
# Converting metrics to dictionaries for easier understanding
per_class_prec = {
k: float(per_class_prec[i]) for i, k in enumerate(target_names)}
per_class_rec = {
k: float(per_class_rec[i]) for i, k in enumerate(target_names)}
per_class_f1 = {
k: float(per_class_f1[i]) for i, k in enumerate(target_names)}
per_class_sup = {
k: float(per_class_sup[i]) for i, k in enumerate(target_names)}
scores = {
'precision': per_class_prec,
'recall': per_class_rec,
'f1': per_class_f1,
'sup': per_class_sup,
'macro_prec': float(macro_prec) if macro_prec is not None else macro_prec,
'macro_rec': float(macro_rec) if macro_rec is not None else macro_rec,
'macro_f1': float(macro_f1) if macro_f1 is not None else macro_f1,
'macro_sup': float(macro_sup) if macro_sup is not None else macro_sup,
'micro_prec': float(micro_prec) if micro_prec is not None else micro_prec,
'micro_rec': float(micro_rec) if micro_rec is not None else micro_rec,
'micro_f1': float(micro_f1) if micro_f1 is not None else micro_f1,
'micro_sup': float(micro_sup) if micro_sup is not None else micro_sup,
}
return scores
metrics = {
'f1': f1,
}