-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
55 lines (47 loc) · 2 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
import torch.nn as nn
import torch.nn.functional as F
class CAE(nn.Module):
def __init__(self, code_bits=1024, width=32, height=32):
super(CAE, self).__init__()
self.w = width
self.h = height
self.imgsize = width * height
self.code_bits = code_bits
# Encoder
self.conv1 = nn.Conv2d(3, 16, 3, padding=1, stride=1)
self.conv2 = nn.Conv2d(16, 16, 3, padding=1, stride=2)
self.conv3 = nn.Conv2d(16, 16, 3, padding=1, stride=1)
self.conv4 = nn.Conv2d(16, 16, 3, padding=1, stride=1)
self.conv5 = nn.Conv2d(16, 16, 3, padding=1, stride=1)
self.conv6 = nn.Conv2d(16, 3, 3, padding=1, stride=1)
self.linear_in = nn.Linear(3 * self.imgsize // 4, code_bits)
# Decoder
self.linear_out = nn.Linear(code_bits, 3 * self.imgsize // 4)
self.deconv1 = nn.ConvTranspose2d(3, 16, 3, padding=1, stride=1)
self.deconv2 = nn.ConvTranspose2d(16, 16, 3, padding=1, stride=1)
self.deconv3 = nn.ConvTranspose2d(16, 16, 3, padding=1, stride=1)
self.deconv4 = nn.ConvTranspose2d(16, 16, 3, padding=1, stride=1)
self.deconv5 = nn.ConvTranspose2d(16, 16, 4, padding=1, stride=2)
self.deconv6 = nn.ConvTranspose2d(16, 3, 3, padding=1, stride=1)
def encode(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = x.view(-1, 3 * self.imgsize // 4)
x = F.sigmoid(self.linear_in(x))
return x
def decode(self, x):
x = F.relu(self.linear_out(x)).view(-1, 3, self.w // 2, self.h // 2)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
x = F.relu(self.deconv4(x))
x = F.relu(self.deconv5(x))
x = F.sigmoid(self.deconv6(x))
return x
def forward(self, x):
return self.decode(self.encode(x))