forked from zhaoyanglijoey/White-Balance-By-Deep-learning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtransformer_net.py
93 lines (78 loc) · 3.27 KB
/
transformer_net.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import torch
import torch.nn as nn
class TransformerNet(nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
self.conv1 = ConvLayer(in_channels=3+5, out_channels=32, kernel_size=9, stride=1)
self.ins1 = nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, 3, 2)
self.ins2 = nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, 3, 2)
self.ins3 = nn.InstanceNorm2d(128, affine=True)
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.ins4 = nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.ins5 = nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigmoid = nn.Sigmoid()
self.softplus = nn.Softplus()
def forward(self, x):
out = self.relu(self.ins1(self.conv1(x)))
out = self.relu(self.ins2(self.conv2(out)))
out = self.relu(self.ins3(self.conv3(out)))
out = self.res1(out)
out = self.res2(out)
out = self.res3(out)
out = self.res4(out)
out = self.res5(out)
out = self.relu(self.ins4(self.deconv1(out)))
out = self.relu(self.ins5(self.deconv2(out)))
out = self.sigmoid(self.deconv3(out))
return out
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
pad_size = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(pad_size)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv(out)
return out
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, 3, 1)
self.ins1 = nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, 3, 1)
self.ins2 = nn.InstanceNorm2d(channels, affine=True)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.ins1(self.conv1(x)))
out = self.ins2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = nn.Upsample(mode='nearest', scale_factor=upsample)
pad_size = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(pad_size)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
out = self.reflection_pad(x_in)
out = self.conv(out)
return out