-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathutils.py
69 lines (52 loc) · 1.84 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from typing import Tuple, List
import torch
import torch.nn.functional as F
import ipdb
from sklearn.metrics import f1_score
def pairwise_loss(u_batch, i_batch_p, i_batch_n, hid_d):
u_batch = u_batch.view(len(u_batch), 1, hid_d)
i_batch_p = i_batch_p.view(len(i_batch_p), hid_d, 1)
i_batch_n = i_batch_n.view(len(i_batch_n), hid_d, 1)
out_p = torch.bmm(u_batch, i_batch_p)
out_n = - torch.bmm(u_batch, i_batch_n)
# sum_p = F.logsigmoid(out_p)
# sum_n = F.logsigmoid(out_n)
# loss_sum = - (sum_p + sum_n)
loss_sum = - F.logsigmoid(out_p + out_n)
loss_sum = loss_sum.sum() / len(loss_sum)
return loss_sum
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def F1(output, labels):
preds = output.max(1)[1].type_as(labels)
macro_f1 = torch.tensor(f1_score(labels.cpu().data.numpy(), preds.cpu().data.numpy(), average='macro')).cuda()
micro_f1 = torch.tensor(f1_score(labels.cpu().data.numpy(), preds.cpu().data.numpy(), average='micro')).cuda()
return micro_f1
def accuracy_nn(preds, labels):
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def train_accuracy_multilabel(output, labels, idx):
preds = output.max(1)[1]
correct = 0
total_num = 0
for i in range(len(idx)):
total_num += 1
index = idx[i]
if preds[index] == labels[i]:
correct += 1
return correct, total_num
def accuracy_multilabel(output, labels, idx):
# print output.size()
preds = output.max(1)[1]
correct = 0
total_num = 0
for i in range(len(idx)):
total_num += 1
index = idx[i]
if preds[index] in labels[i]:
correct += 1
return correct, total_num