Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add act_cfg #2239

Merged
merged 5 commits into from
Mar 12, 2020
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion configs/carafe/faster_rcnn_r50_fpn_carafe_1x.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
start_level=0,
end_level=-1,
norm_cfg=None,
activation=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
Expand Down
2 changes: 1 addition & 1 deletion configs/carafe/mask_rcnn_r50_fpn_carafe_1x.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
start_level=0,
end_level=-1,
norm_cfg=None,
activation=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
Expand Down
4 changes: 2 additions & 2 deletions mmdet/models/bbox_heads/double_bbox_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def __init__(self,
out_channels,
kernel_size=1,
bias=False,
activation=None,
act_cfg=None,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We may follow the argument order: conv_cfg, norm_cfg, act_cfg.


Expand All @@ -52,7 +52,7 @@ def __init__(self,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=None)
act_cfg=None)

self.relu = nn.ReLU(inplace=True)

Expand Down
9 changes: 4 additions & 5 deletions mmdet/models/necks/fpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,13 @@ def __init__(self,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
activation=None):
act_cfg=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
Expand Down Expand Up @@ -94,7 +93,7 @@ def __init__(self,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
activation=self.activation,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(
out_channels,
Expand All @@ -103,7 +102,7 @@ def __init__(self,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
act_cfg=act_cfg,
inplace=False)

self.lateral_convs.append(l_conv)
Expand All @@ -125,7 +124,7 @@ def __init__(self,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)

Expand Down
11 changes: 5 additions & 6 deletions mmdet/models/necks/fpn_carafe.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __init__(self,
start_level=0,
end_level=-1,
norm_cfg=None,
activation=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
Expand All @@ -52,7 +52,6 @@ def __init__(self,
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
hellock marked this conversation as resolved.
Show resolved Hide resolved
self.norm_cfg = norm_cfg
self.with_bias = norm_cfg is None
self.upsample_cfg = upsample_cfg.copy()
Expand Down Expand Up @@ -93,7 +92,7 @@ def __init__(self,
1,
norm_cfg=norm_cfg,
bias=self.with_bias,
activation=activation,
act_cfg=act_cfg,
inplace=False,
order=self.order)
fpn_conv = ConvModule(
Expand All @@ -103,7 +102,7 @@ def __init__(self,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
activation=activation,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if i != self.backbone_end_level - 1:
Expand Down Expand Up @@ -153,7 +152,7 @@ def __init__(self,
padding=1,
norm_cfg=norm_cfg,
bias=self.with_bias,
activation=self.activation,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if self.upsample == 'deconv':
Expand Down Expand Up @@ -192,7 +191,7 @@ def __init__(self,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
activation=activation,
act_cfg=act_cfg,
inplace=False,
order=self.order)
self.upsample_modules.append(upsample_module)
Expand Down
4 changes: 2 additions & 2 deletions mmdet/models/necks/hrfpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def __init__(self,
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
activation=None)
act_cfg=None)

self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
Expand All @@ -63,7 +63,7 @@ def __init__(self,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
activation=None))
act_cfg=None))

if pooling_type == 'MAX':
self.pooling = F.max_pool2d
Expand Down
8 changes: 2 additions & 6 deletions mmdet/models/necks/nas_fpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,19 +110,15 @@ def __init__(self,
out_channels,
1,
norm_cfg=norm_cfg,
activation=None)
act_cfg=None)
self.lateral_convs.append(l_conv)

# add extra downsample layers (stride-2 pooling or conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_conv = ConvModule(
out_channels,
out_channels,
1,
norm_cfg=norm_cfg,
activation=None)
out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.extra_downsamples.append(
nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))

Expand Down
17 changes: 4 additions & 13 deletions mmdet/models/plugins/non_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,27 +39,18 @@ def __init__(self,
# g, theta, phi are actually `nn.Conv2d`. Here we use ConvModule for
# potential usage.
self.g = ConvModule(
self.in_channels,
self.inter_channels,
kernel_size=1,
activation=None)
self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.theta = ConvModule(
self.in_channels,
self.inter_channels,
kernel_size=1,
activation=None)
self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.phi = ConvModule(
self.in_channels,
self.inter_channels,
kernel_size=1,
activation=None)
self.in_channels, self.inter_channels, kernel_size=1, act_cfg=None)
self.conv_out = ConvModule(
self.inter_channels,
self.in_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=None)
act_cfg=None)

self.init_weights()

Expand Down
38 changes: 38 additions & 0 deletions mmdet/models/utils/activation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import torch.nn as nn

activation_cfg = {
# format: layer_type: (abbreviation, module)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This comment is incorrect.

'relu': nn.ReLU,
'leaky_relu': nn.LeakyReLU,
xvjiarui marked this conversation as resolved.
Show resolved Hide resolved
'p_relu': nn.PReLU,
'r_relu': nn.RReLU,
'relu6': nn.ReLU6,
'selu': nn.SELU,
'celu': nn.CELU
}


def build_activation_layer(cfg):
""" Build activation layer

Args:
cfg (dict): cfg should contain:
type (str): Identify activation layer type.
hellock marked this conversation as resolved.
Show resolved Hide resolved
layer args: args needed to instantiate a activation layer.

Returns:
layer (nn.Module): Created activation layer
"""
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()

layer_type = cfg_.pop('type')
if layer_type not in activation_cfg:
raise KeyError('Unrecognized activation type {}'.format(layer_type))
else:
activation = activation_cfg[layer_type]
if activation is None:
raise NotImplementedError

layer = activation(**cfg_)
return layer
24 changes: 13 additions & 11 deletions mmdet/models/utils/conv_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from mmcv.cnn import constant_init, kaiming_init

from mmdet.ops import DeformConvPack, ModulatedDeformConvPack
from .activation import build_activation_layer
from .conv_ws import ConvWS2d
from .norm import build_norm_layer

Expand Down Expand Up @@ -60,7 +61,7 @@ class ConvModule(nn.Module):
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
activation (str or None): Activation type, "ReLU" by default.
act_cfg (str or None): Activation type, "ReLU" by default.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is no longer a str.

inplace (bool): Whether to use inplace mode for activation.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
Expand All @@ -78,22 +79,23 @@ def __init__(self,
bias='auto',
conv_cfg=None,
norm_cfg=None,
activation='relu',
act_cfg=dict(type='relu'),
inplace=True,
order=('conv', 'norm', 'act')):
super(ConvModule, self).__init__()
assert conv_cfg is None or isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
assert act_cfg is None or isinstance(act_cfg, dict)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.act_cfg = act_cfg
self.inplace = inplace
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 3
assert set(order) == set(['conv', 'norm', 'act'])

self.with_norm = norm_cfg is not None
self.with_activation = activation is not None
self.with_activation = act_cfg is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == 'auto':
bias = False if self.with_norm else True
Expand Down Expand Up @@ -136,12 +138,9 @@ def __init__(self,

# build activation layer
if self.with_activation:
# TODO: introduce `act_cfg` and supports more activation layers
if self.activation not in ['relu']:
raise ValueError('{} is currently not supported.'.format(
self.activation))
if self.activation == 'relu':
self.activate = nn.ReLU(inplace=inplace)
act_cfg_ = act_cfg.copy()
act_cfg_.setdefault('inplace', inplace)
self.activate = build_activation_layer(act_cfg_)

# Use msra init by default
self.init_weights()
Expand All @@ -151,7 +150,10 @@ def norm(self):
return getattr(self, self.norm_name)

def init_weights(self):
nonlinearity = 'relu' if self.activation is None else self.activation
if self.with_activation and self.act_cfg['type'] == 'leaky_relu':
nonlinearity = 'leaky_relu'
else:
nonlinearity = 'relu'
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
Expand Down