-
Notifications
You must be signed in to change notification settings - Fork 31
/
net.py
126 lines (104 loc) · 4.54 KB
/
net.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Copyright 2018-2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
from torch import nn
from torch.nn import functional as F
class Generator(nn.Module):
def __init__(self, z_size, d=128, channels=1):
super(Generator, self).__init__()
self.deconv1_1 = nn.ConvTranspose2d(z_size, d*2, 4, 1, 0)
self.deconv1_1_bn = nn.BatchNorm2d(d*2)
self.deconv2 = nn.ConvTranspose2d(d*2, d*2, 4, 2, 1)
self.deconv2_bn = nn.BatchNorm2d(d*2)
self.deconv3 = nn.ConvTranspose2d(d*2, d, 4, 2, 1)
self.deconv3_bn = nn.BatchNorm2d(d)
self.deconv4 = nn.ConvTranspose2d(d, channels, 4, 2, 1)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, x):
x = F.relu(self.deconv1_1_bn(self.deconv1_1(x)))
x = F.relu(self.deconv2_bn(self.deconv2(x)))
x = F.relu(self.deconv3_bn(self.deconv3(x)))
x = torch.tanh(self.deconv4(x)) * 0.5 + 0.5
return x
class Discriminator(nn.Module):
def __init__(self, d=128, channels=1):
super(Discriminator, self).__init__()
self.conv1_1 = nn.Conv2d(channels, d//2, 4, 2, 1)
self.conv2 = nn.Conv2d(d // 2, d*2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(d*2)
self.conv3 = nn.Conv2d(d*2, d*4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(d*4)
self.conv4 = nn.Conv2d(d * 4, 1, 4, 1, 0)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, input):
x = F.leaky_relu(self.conv1_1(input), 0.2)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
x = torch.sigmoid(self.conv4(x))
return x
class Encoder(nn.Module):
def __init__(self, z_size, d=128, channels=1):
super(Encoder, self).__init__()
self.conv1_1 = nn.Conv2d(channels, d, 4, 2, 1)
self.conv2 = nn.Conv2d(d, d*2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(d*2)
self.conv3 = nn.Conv2d(d*2, d*4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(d*4)
self.conv4 = nn.Conv2d(d * 4, z_size, 4, 1, 0)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, input):
x = F.leaky_relu(self.conv1_1(input), 0.2)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
x = self.conv4(x)
return x
class ZDiscriminator(nn.Module):
def __init__(self, z_size, batchSize, d=128):
super(ZDiscriminator, self).__init__()
self.linear1 = nn.Linear(z_size, d)
self.linear2 = nn.Linear(d, d)
self.linear3 = nn.Linear(d, 1)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, x):
x = F.leaky_relu((self.linear1(x)), 0.2)
x = F.leaky_relu((self.linear2(x)), 0.2)
x = torch.sigmoid(self.linear3(x))
return x
class ZDiscriminator_mergebatch(nn.Module):
def __init__(self, z_size, batchSize, d=128):
super(ZDiscriminator_mergebatch, self).__init__()
self.linear1 = nn.Linear(z_size, d)
self.linear2 = nn.Linear(d * batchSize, d)
self.linear3 = nn.Linear(d, 1)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, x):
x = F.leaky_relu((self.linear1(x)), 0.2).view(1, -1) # after the second layer all samples are concatenated
x = F.leaky_relu((self.linear2(x)), 0.2)
x = torch.sigmoid(self.linear3(x))
return x
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.weight.data.normal_(mean, std)
m.bias.data.zero_()