-
Notifications
You must be signed in to change notification settings - Fork 11
/
mlp_dropout.py
52 lines (40 loc) · 1.54 KB
/
mlp_dropout.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from __future__ import print_function
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class MLPRegression(nn.Module):
def __init__(self, input_size, hidden_size):
super(MLPRegression, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, 1)
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
pred = self.h2_weights(h1)
if y is not None:
y = Variable(y)
mse = F.mse_loss(pred, y)
mae = F.l1_loss(pred, y)
return pred, mae, mse
else:
return pred
class MLPClassifier(nn.Module):
def __init__(self, input_size, hidden_size, num_class, with_dropout=False):
super(MLPClassifier, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, num_class)
self.with_dropout = with_dropout
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
if self.with_dropout:
h1 = F.dropout(h1, training=self.training)
logits = self.h2_weights(h1)
logits = F.log_softmax(logits, dim=1)
if y is not None:
loss = F.nll_loss(logits, y)
pred = logits.data.max(1, keepdim=True)[1]
acc = pred.eq(y.data.view_as(pred)).cpu().sum().item() / float(y.size()[0])
return logits, loss, acc
else:
return logits