-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathanalyzer.py
86 lines (70 loc) · 3.06 KB
/
analyzer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
"""class for fixed point analysis"""
import torch
import torch.nn.functional as F
from torch.autograd import Variable
class FixedPoint(object):
def __init__(self, model, device, gamma=0.01, speed_tor=1e-06, max_epochs=200000,
lr_decay_epoch=10000):
self.model = model
self.device = device
self.gamma = gamma
self.speed_tor = speed_tor
self.max_epochs = max_epochs
self.lr_decay_epoch = lr_decay_epoch
self.model.eval()
def calc_speed(self, hidden_activated, const_signal):
input_signal = const_signal.permute(1, 0, 2)
pre_activates = self.model.w_in(input_signal[0]) + self.model.w_hh(hidden_activated)
if self.model.activation == 'relu':
activated = F.relu(pre_activates)
else:
activated = torch.tanh(pre_activates)
speed = torch.norm(activated - hidden_activated)
return speed
def find_fixed_point(self, init_hidden, const_signal, view=False):
new_hidden = init_hidden.clone()
gamma = self.gamma
result_ok = True
i = 0
while True:
hidden_activated = Variable(new_hidden).to(self.device)
hidden_activated.requires_grad = True
speed = self.calc_speed(hidden_activated, const_signal)
if view and i % 1000 == 0:
print(f'epoch: {i}, speed={speed.item()}')
if speed.item() < self.speed_tor:
print(f'epoch: {i}, speed={speed.item()}')
break
speed.backward()
if i % self.lr_decay_epoch == 0 and i > 0:
gamma *= 0.5
if i == self.max_epochs:
print(f'forcibly finished. speed={speed.item()}')
result_ok = False
break
i += 1
new_hidden = hidden_activated - gamma * hidden_activated.grad
fixed_point = new_hidden[0, 0]
return fixed_point, result_ok
def calc_jacobian(self, fixed_point, const_signal_tensor):
fixed_point = torch.unsqueeze(fixed_point, dim=1)
fixed_point = Variable(fixed_point).to(self.device)
fixed_point.requires_grad = True
input_signal = const_signal_tensor.permute(1, 0, 2)
w_hh = self.model.w_hh.weight
w_hh.requires_grad = False
w_hh = w_hh.to(self.device)
pre_activates = torch.unsqueeze(self.model.w_in(input_signal[0])[0], dim=1) + \
w_hh @ fixed_point + torch.unsqueeze(self.model.w_hh.bias, dim=1)
if self.model.activation == 'relu':
activated = F.relu(pre_activates)
else:
activated = torch.tanh(pre_activates)
jacobian = torch.zeros(self.model.n_hid, self.model.n_hid)
for i in range(self.model.n_hid):
output = torch.zeros(self.model.n_hid, 1).to(self.device)
output[i] = 1.
jacobian[:, i:i + 1] = torch.autograd.grad(activated, fixed_point, grad_outputs=output, retain_graph=True)[
0]
jacobian = jacobian.numpy().T
return jacobian