-
Notifications
You must be signed in to change notification settings - Fork 14
/
utils.py
154 lines (123 loc) · 4.79 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# Copyright (c) 2020-present, Royal Bank of Canada.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
from sklearn.neighbors import kneighbors_graph
EOS = 1e-10
def apply_non_linearity(tensor, non_linearity, i):
if non_linearity == 'elu':
return F.elu(tensor * i - i) + 1
elif non_linearity == 'relu':
return F.relu(tensor)
elif non_linearity == 'none':
return tensor
else:
raise NameError('We dont support the non-linearity yet')
def get_random_mask(features, r, nr):
nones = torch.sum(features > 0.0).float()
nzeros = features.shape[0] * features.shape[1] - nones
pzeros = nones / nzeros / r * nr
probs = torch.zeros(features.shape).cuda()
probs[features == 0.0] = pzeros
probs[features > 0.0] = 1 / r
mask = torch.bernoulli(probs)
return mask
def get_random_mask_ogb(features, r):
probs = torch.full(features.shape, 1 / r)
mask = torch.bernoulli(probs)
return mask
def accuracy(preds, labels):
pred_class = torch.max(preds, 1)[1]
return torch.sum(torch.eq(pred_class, labels)).float() / labels.shape[0]
def nearest_neighbors(X, k, metric):
adj = kneighbors_graph(X, k, metric=metric)
adj = np.array(adj.todense(), dtype=np.float32)
adj += np.eye(adj.shape[0])
return adj
def nearest_neighbors_sparse(X, k, metric):
adj = kneighbors_graph(X, k, metric=metric)
loop = np.arange(X.shape[0])
[s_, d_, val] = sp.find(adj)
s = np.concatenate((s_, loop))
d = np.concatenate((d_, loop))
return s, d
def nearest_neighbors_pre_exp(X, k, metric, i):
adj = kneighbors_graph(X, k, metric=metric)
adj = np.array(adj.todense(), dtype=np.float32)
adj += np.eye(adj.shape[0])
adj = adj * i - i
return adj
def nearest_neighbors_pre_elu(X, k, metric, i):
adj = kneighbors_graph(X, k, metric=metric)
adj = np.array(adj.todense(), dtype=np.float32)
adj += np.eye(adj.shape[0])
adj = adj * i - i
return adj
def normalize(adj, mode, sparse=False):
if not sparse:
if mode == "sym":
inv_sqrt_degree = 1. / (torch.sqrt(adj.sum(dim=1, keepdim=False)) + EOS)
return inv_sqrt_degree[:, None] * adj * inv_sqrt_degree[None, :]
elif mode == "row":
inv_degree = 1. / (adj.sum(dim=1, keepdim=False) + EOS)
return inv_degree[:, None] * adj
else:
exit("wrong norm mode")
else:
adj = adj.coalesce()
if mode == "sym":
inv_sqrt_degree = 1. / (torch.sqrt(torch.sparse.sum(adj, dim=1).values()))
D_value = inv_sqrt_degree[adj.indices()[0]] * inv_sqrt_degree[adj.indices()[1]]
elif mode == "row":
inv_degree = 1. / (torch.sparse.sum(adj, dim=1).values() + EOS)
D_value = inv_degree[adj.indices()[0]]
else:
exit("wrong norm mode")
new_values = adj.values() * D_value
return torch.sparse.FloatTensor(adj.indices(), new_values, adj.size())
def symmetrize(adj): # only for non-sparse
return (adj + adj.T) / 2
def cal_similarity_graph(node_embeddings):
similarity_graph = torch.mm(node_embeddings, node_embeddings.t())
return similarity_graph
def top_k(raw_graph, K):
values, indices = raw_graph.topk(k=int(K), dim=-1)
assert torch.max(indices) < raw_graph.shape[1]
mask = torch.zeros(raw_graph.shape).cuda()
mask[torch.arange(raw_graph.shape[0]).view(-1, 1), indices] = 1.
mask.requires_grad = False
sparse_graph = raw_graph * mask
return sparse_graph
def knn_fast(X, k, b):
X = F.normalize(X, dim=1, p=2)
index = 0
values = torch.zeros(X.shape[0] * (k + 1)).cuda()
rows = torch.zeros(X.shape[0] * (k + 1)).cuda()
cols = torch.zeros(X.shape[0] * (k + 1)).cuda()
norm_row = torch.zeros(X.shape[0]).cuda()
norm_col = torch.zeros(X.shape[0]).cuda()
while index < X.shape[0]:
if (index + b) > (X.shape[0]):
end = X.shape[0]
else:
end = index + b
sub_tensor = X[index:index + b]
similarities = torch.mm(sub_tensor, X.t())
vals, inds = similarities.topk(k=k + 1, dim=-1)
values[index * (k + 1):(end) * (k + 1)] = vals.view(-1)
cols[index * (k + 1):(end) * (k + 1)] = inds.view(-1)
rows[index * (k + 1):(end) * (k + 1)] = torch.arange(index, end).view(-1, 1).repeat(1, k + 1).view(-1)
norm_row[index: end] = torch.sum(vals, dim=1)
norm_col.index_add_(-1, inds.view(-1), vals.view(-1))
index += b
norm = norm_row + norm_col
rows = rows.long()
cols = cols.long()
values *= (torch.pow(norm[rows], -0.5) * torch.pow(norm[cols], -0.5))
return rows, cols, values