-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathlayers.py
55 lines (49 loc) · 1.93 KB
/
layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from utils import *
from IPython import embed
class View(nn.Module):
def __init__(self,o):
super(View, self).__init__()
self.o = o
def forward(self,x):
return x.view(-1, self.o)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self,x):
return x.view(x.size(0), -1)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self,x):
return x
class FC(nn.Sequential):
def __init__(self, in_features, out_features, bias=True, activation_fn=nn.ReLU, batch_norm=True):
if batch_norm:
super(FC, self).__init__(
nn.Linear(in_features, out_features, bias=False),
nn.BatchNorm1d(out_features, affine=True),
activation_fn(inplace=True)
)
else:
super(FC, self).__init__(
nn.Linear(in_features, out_features, bias=bias),
activation_fn(inplace=True)
)
class Conv(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=None, output_padding=0, activation_fn=nn.ReLU, batch_norm=True, transpose=False):
if padding is None:
padding = (kernel_size-1)//2
model = []
if not transpose:
model += [nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=not batch_norm)]
else:
model += [nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=not batch_norm)]
if batch_norm:
model += [nn.BatchNorm2d(out_channels, affine=True)]
model += [activation_fn()]
super(Conv, self).__init__(*model)