-
Notifications
You must be signed in to change notification settings - Fork 1
/
model.py
71 lines (63 loc) · 2.48 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import io
import torch
import torch.nn as nn
import numpy as np
from copy import deepcopy
from collections import OrderedDict
class LTRModel:
def __init__(self) -> None:
self.model = nn.Sequential(
nn.Linear(3*768, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, 1),
nn.Sigmoid(),
)
self.last_state = deepcopy(self.model.state_dict())
self._criterion = nn.BCELoss()
self._optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01)
def serialize_model(self) -> io.BytesIO:
buffer = io.BytesIO()
torch.save(self.model.state_dict(), buffer)
return buffer
def make_input(self, query_vector, sup_doc_vector, inf_doc_vector):
"""
Make (query, document-pair) input for model.
"""
return np.array([query_vector, sup_doc_vector, inf_doc_vector], dtype=np.float32).flatten()
def train(self, pos_train_data, neg_train_data, num_epochs):
self.last_state = deepcopy(self.model.state_dict())
for epoch in range(num_epochs):
losses = []
# train positive pairs
for data in pos_train_data:
output = self.model(data)
loss = self._criterion(output, torch.tensor([1.0]))
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
losses.append(loss.item())
# train negative pairs too
for data in neg_train_data:
output = self.model(data)
loss = self._criterion(output, torch.tensor([0.0]))
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
losses.append(loss.item())
if (epoch + 1) == num_epochs:
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {(sum(losses) / len(losses)):.4f}')
# def last_update(self):
# updates = {}
# for name, param in self.model.named_parameters():
# updates[name] = param.data - last_state[name]
# return updates
def apply_updates(self, update_model: OrderedDict):
print('applying updates...')
update_state = update_model
for name, param in self.model.named_parameters():
if name in update_state:
param.data = (param.data + update_state[name]) / 2.0