forked from geekyutao/RN
-
Notifications
You must be signed in to change notification settings - Fork 0
/
module_util.py
123 lines (109 loc) · 4.14 KB
/
module_util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale # for residual block
if m.bias is not None:
init.normal_(m.bias, 0.0001)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
init.normal_(m.bias, 0.0001)
elif isinstance(m, nn.BatchNorm2d):
try:
init.constant_(m.weight, 1)
init.normal_(m.bias, 0.0001)
except:
print('This layer has no BN parameters:', m)
def make_layer(block, n_layers):
layers = []
for _ in range(n_layers):
layers.append(block())
return nn.Sequential(*layers)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# self.spatialpool = SpatialPool(channel)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=True),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=True),
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b,c,1)
return y
# class ResidualBlock_noBN(nn.Module):
# def __init__(self, nf=64, stride=1, downsample=None, reduction=4):
# super(ResidualBlock_noBN, self).__init__()
# self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
# self.relu = nn.ReLU(inplace=True)
# self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
# self.se = SELayer(nf, reduction)
# self.sigmoid = nn.Sigmoid()
# def forward(self, x):
# residual = x
# out = self.relu(self.conv1(x))
# out = self.conv2(out)
# y = self.se(out)
# y = self.sigmoid(y.view(y.size(0),-1))
# y = y.view(y.size(0), y.size(1),1,1)
# out = torch.mul(out, y)
# out += residual
# return out
class ResidualBlock_noBN(nn.Module):
'''Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
'''
def __init__(self, nf=64):
super(ResidualBlock_noBN, self).__init__()
self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
# initialization
initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = F.relu(self.conv1(x), inplace=True)
out = self.conv2(out)
return identity + out
def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros'):
"""Warp an image or feature map with optical flow
Args:
x (Tensor): size (N, C, H, W)
flow (Tensor): size (N, H, W, 2), normal value
interp_mode (str): 'nearest' or 'bilinear'
padding_mode (str): 'zeros' or 'border' or 'reflection'
Returns:
Tensor: warped image or feature map
"""
assert x.size()[-2:] == flow.size()[1:3]
B, C, H, W = x.size()
# mesh grid
grid_y, grid_x = torch.meshgrid(torch.arange(0, H), torch.arange(0, W))
grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
grid.requires_grad = False
grid = grid.type_as(x)
vgrid = grid + flow
# scale grid to [-1,1]
vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(W - 1, 1) - 1.0
vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(H - 1, 1) - 1.0
vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode)
return output