Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Fix] Make inputs & channels smaller in unittest #1004

Merged
merged 1 commit into from
Nov 1, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 23 additions & 23 deletions tests/test_models/test_backbones/test_bisenetv1.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,20 +25,20 @@ def test_bisenetv1_backbone():
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 256, 512)
imgs = torch.randn(batch_size, 3, 64, 128)
feat = model(imgs)

assert len(feat) == 3
# output for segment Head
assert feat[0].shape == torch.Size([batch_size, 256, 32, 64])
assert feat[0].shape == torch.Size([batch_size, 256, 8, 16])
# for auxiliary head 1
assert feat[1].shape == torch.Size([batch_size, 128, 32, 64])
assert feat[1].shape == torch.Size([batch_size, 128, 8, 16])
# for auxiliary head 2
assert feat[2].shape == torch.Size([batch_size, 128, 16, 32])
assert feat[2].shape == torch.Size([batch_size, 128, 4, 8])

# Test input with rare shape
batch_size = 2
imgs = torch.randn(batch_size, 3, 527, 279)
imgs = torch.randn(batch_size, 3, 95, 27)
feat = model(imgs)
assert len(feat) == 3

Expand All @@ -47,20 +47,20 @@ def test_bisenetv1_backbone():
BiSeNetV1(
backbone_cfg=backbone_cfg,
in_channels=3,
spatial_channels=(64, 64, 64))
spatial_channels=(16, 16, 16))

with pytest.raises(AssertionError):
# BiSeNetV1 context path constraints.
BiSeNetV1(
backbone_cfg=backbone_cfg,
in_channels=3,
context_channels=(128, 256, 512, 1024))
context_channels=(16, 32, 64, 128))


def test_bisenetv1_spatial_path():
with pytest.raises(AssertionError):
# BiSeNetV1 spatial path channel constraints.
SpatialPath(num_channels=(64, 64, 64), in_channels=3)
SpatialPath(num_channels=(16, 16, 16), in_channels=3)


def test_bisenetv1_context_path():
Expand All @@ -79,31 +79,31 @@ def test_bisenetv1_context_path():
with pytest.raises(AssertionError):
# BiSeNetV1 context path constraints.
ContextPath(
backbone_cfg=backbone_cfg, context_channels=(128, 256, 512, 1024))
backbone_cfg=backbone_cfg, context_channels=(16, 32, 64, 128))


def test_bisenetv1_attention_refinement_module():
x_arm = AttentionRefinementModule(256, 64)
assert x_arm.conv_layer.in_channels == 256
assert x_arm.conv_layer.out_channels == 64
x_arm = AttentionRefinementModule(32, 8)
assert x_arm.conv_layer.in_channels == 32
assert x_arm.conv_layer.out_channels == 8
assert x_arm.conv_layer.kernel_size == (3, 3)
x = torch.randn(2, 256, 32, 64)
x = torch.randn(2, 32, 8, 16)
x_out = x_arm(x)
assert x_out.shape == torch.Size([2, 64, 32, 64])
assert x_out.shape == torch.Size([2, 8, 8, 16])


def test_bisenetv1_feature_fusion_module():
ffm = FeatureFusionModule(128, 256)
assert ffm.conv1.in_channels == 128
assert ffm.conv1.out_channels == 256
ffm = FeatureFusionModule(16, 32)
assert ffm.conv1.in_channels == 16
assert ffm.conv1.out_channels == 32
assert ffm.conv1.kernel_size == (1, 1)
assert ffm.gap.output_size == (1, 1)
assert ffm.conv_atten[0].in_channels == 256
assert ffm.conv_atten[0].out_channels == 256
assert ffm.conv_atten[0].in_channels == 32
assert ffm.conv_atten[0].out_channels == 32
assert ffm.conv_atten[0].kernel_size == (1, 1)

ffm = FeatureFusionModule(128, 128)
x1 = torch.randn(2, 64, 64, 128)
x2 = torch.randn(2, 64, 64, 128)
ffm = FeatureFusionModule(16, 16)
x1 = torch.randn(2, 8, 8, 16)
x2 = torch.randn(2, 8, 8, 16)
x_out = ffm(x1, x2)
assert x_out.shape == torch.Size([2, 128, 64, 128])
assert x_out.shape == torch.Size([2, 16, 8, 16])
28 changes: 14 additions & 14 deletions tests/test_models/test_backbones/test_bisenetv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,34 +13,34 @@ def test_bisenetv2_backbone():
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 512, 1024)
imgs = torch.randn(batch_size, 3, 128, 256)
feat = model(imgs)

assert len(feat) == 5
# output for segment Head
assert feat[0].shape == torch.Size([batch_size, 128, 64, 128])
assert feat[0].shape == torch.Size([batch_size, 128, 16, 32])
# for auxiliary head 1
assert feat[1].shape == torch.Size([batch_size, 16, 128, 256])
assert feat[1].shape == torch.Size([batch_size, 16, 32, 64])
# for auxiliary head 2
assert feat[2].shape == torch.Size([batch_size, 32, 64, 128])
assert feat[2].shape == torch.Size([batch_size, 32, 16, 32])
# for auxiliary head 3
assert feat[3].shape == torch.Size([batch_size, 64, 32, 64])
assert feat[3].shape == torch.Size([batch_size, 64, 8, 16])
# for auxiliary head 4
assert feat[4].shape == torch.Size([batch_size, 128, 16, 32])
assert feat[4].shape == torch.Size([batch_size, 128, 4, 8])

# Test input with rare shape
batch_size = 2
imgs = torch.randn(batch_size, 3, 527, 952)
imgs = torch.randn(batch_size, 3, 95, 27)
feat = model(imgs)
assert len(feat) == 5


def test_bisenetv2_DetailBranch():
x = torch.randn(1, 3, 512, 1024)
detail_branch = DetailBranch(detail_channels=(64, 64, 128))
x = torch.randn(1, 3, 32, 64)
detail_branch = DetailBranch(detail_channels=(64, 16, 32))
assert isinstance(detail_branch.detail_branch[0][0], ConvModule)
x_out = detail_branch(x)
assert x_out.shape == torch.Size([1, 128, 64, 128])
assert x_out.shape == torch.Size([1, 32, 4, 8])


def test_bisenetv2_SemanticBranch():
Expand All @@ -49,9 +49,9 @@ def test_bisenetv2_SemanticBranch():


def test_bisenetv2_BGALayer():
x_a = torch.randn(1, 128, 64, 128)
x_b = torch.randn(1, 128, 16, 32)
bga = BGALayer()
x_a = torch.randn(1, 8, 8, 16)
x_b = torch.randn(1, 8, 2, 4)
bga = BGALayer(out_channels=8)
assert isinstance(bga.conv, ConvModule)
x_out = bga(x_a, x_b)
assert x_out.shape == torch.Size([1, 128, 64, 128])
assert x_out.shape == torch.Size([1, 8, 8, 16])
20 changes: 15 additions & 5 deletions tests/test_models/test_backbones/test_fast_scnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,27 @@ def test_fastscnn_backbone():
lower_in_channels=128)

# Test FastSCNN Standard Forward
model = FastSCNN()
model = FastSCNN(
in_channels=3,
downsample_dw_channels=(4, 6),
global_in_channels=8,
global_block_channels=(8, 12, 16),
global_block_strides=(2, 2, 1),
global_out_channels=16,
higher_in_channels=8,
lower_in_channels=16,
fusion_out_channels=16,
)
model.init_weights()
model.train()
batch_size = 4
imgs = torch.randn(batch_size, 3, 512, 1024)
imgs = torch.randn(batch_size, 3, 64, 128)
feat = model(imgs)

assert len(feat) == 3
# higher-res
assert feat[0].shape == torch.Size([batch_size, 64, 64, 128])
assert feat[0].shape == torch.Size([batch_size, 8, 8, 16])
# lower-res
assert feat[1].shape == torch.Size([batch_size, 128, 16, 32])
assert feat[1].shape == torch.Size([batch_size, 16, 2, 4])
# FFM output
assert feat[2].shape == torch.Size([batch_size, 128, 64, 128])
assert feat[2].shape == torch.Size([batch_size, 16, 8, 16])
10 changes: 5 additions & 5 deletions tests/test_models/test_backbones/test_hrnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,21 +95,21 @@ def test_hrnet_backbone():
model.init_weights()
model.train()

imgs = torch.randn(1, 3, 256, 256)
imgs = torch.randn(1, 3, 64, 64)
feats = model(imgs)
assert len(feats) == 4
assert feats[0].shape == torch.Size([1, 32, 64, 64])
assert feats[3].shape == torch.Size([1, 256, 8, 8])
assert feats[0].shape == torch.Size([1, 32, 16, 16])
assert feats[3].shape == torch.Size([1, 256, 2, 2])

# Test single scale output
model = HRNet(extra=extra, multiscale_output=False)
model.init_weights()
model.train()

imgs = torch.randn(1, 3, 256, 256)
imgs = torch.randn(1, 3, 64, 64)
feats = model(imgs)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, 32, 64, 64])
assert feats[0].shape == torch.Size([1, 32, 16, 16])

# Test HRNET with two stage frozen
frozen_stages = 2
Expand Down
18 changes: 10 additions & 8 deletions tests/test_models/test_backbones/test_icnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,32 +10,34 @@ def test_icnet_backbone():
# Must give backbone dict in config file.
ICNet(
in_channels=3,
layer_channels=(512, 2048),
light_branch_middle_channels=32,
psp_out_channels=512,
out_channels=(64, 256, 256),
layer_channels=(128, 512),
light_branch_middle_channels=8,
psp_out_channels=128,
out_channels=(16, 128, 128),
backbone_cfg=None)

# Test ICNet Standard Forward
model = ICNet(
layer_channels=(128, 512),
backbone_cfg=dict(
type='ResNetV1c',
in_channels=3,
depth=50,
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
style='pytorch',
contract_dilation=True), )
contract_dilation=True),
)
assert hasattr(model.backbone,
'maxpool') and model.backbone.maxpool.ceil_mode is True
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 512, 1024)
imgs = torch.randn(batch_size, 3, 32, 64)
feat = model(imgs)

assert model.psp_modules[0][0].output_size == 1
Expand All @@ -45,4 +47,4 @@ def test_icnet_backbone():
assert model.conv_sub1[0].padding == 1

assert len(feat) == 3
assert feat[0].shape == torch.Size([batch_size, 64, 64, 128])
assert feat[0].shape == torch.Size([batch_size, 64, 4, 8])
18 changes: 9 additions & 9 deletions tests/test_models/test_backbones/test_mit.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def test_mit():
assert outs[3].shape == (1, 256, H // 32, W // 32)

# Test non-squared input
H, W = (224, 320)
H, W = (224, 256)
temp = torch.randn((1, 3, H, W))
outs = model(temp)
assert outs[0].shape == (1, 32, H // 4, W // 4)
Expand All @@ -33,25 +33,25 @@ def test_mit():
assert outs[3].shape == (1, 256, H // 32, W // 32)

# Test MixFFN
FFN = MixFFN(128, 512)
FFN = MixFFN(64, 128)
hw_shape = (32, 32)
token_len = 32 * 32
temp = torch.randn((1, token_len, 128))
temp = torch.randn((1, token_len, 64))
# Self identity
out = FFN(temp, hw_shape)
assert out.shape == (1, token_len, 128)
assert out.shape == (1, token_len, 64)
# Out identity
outs = FFN(temp, hw_shape, temp)
assert out.shape == (1, token_len, 128)
assert out.shape == (1, token_len, 64)

# Test EfficientMHA
MHA = EfficientMultiheadAttention(128, 2)
MHA = EfficientMultiheadAttention(64, 2)
hw_shape = (32, 32)
token_len = 32 * 32
temp = torch.randn((1, token_len, 128))
temp = torch.randn((1, token_len, 64))
# Self identity
out = MHA(temp, hw_shape)
assert out.shape == (1, token_len, 128)
assert out.shape == (1, token_len, 64)
# Out identity
outs = MHA(temp, hw_shape, temp)
assert out.shape == (1, token_len, 128)
assert out.shape == (1, token_len, 64)
24 changes: 12 additions & 12 deletions tests/test_models/test_backbones/test_mobilenet_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,24 +32,24 @@ def test_mobilenet_v3():
model.init_weights()
model.train()

imgs = torch.randn(2, 3, 224, 224)
imgs = torch.randn(2, 3, 56, 56)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == (2, 16, 112, 112)
assert feat[1].shape == (2, 16, 56, 56)
assert feat[2].shape == (2, 576, 28, 28)
assert feat[0].shape == (2, 16, 28, 28)
assert feat[1].shape == (2, 16, 14, 14)
assert feat[2].shape == (2, 576, 7, 7)

# Test MobileNetV3 with arch = 'large'
model = MobileNetV3(arch='large', out_indices=(1, 3, 16))
model.init_weights()
model.train()

imgs = torch.randn(2, 3, 224, 224)
imgs = torch.randn(2, 3, 56, 56)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == (2, 16, 112, 112)
assert feat[1].shape == (2, 24, 56, 56)
assert feat[2].shape == (2, 960, 28, 28)
assert feat[0].shape == (2, 16, 28, 28)
assert feat[1].shape == (2, 24, 14, 14)
assert feat[2].shape == (2, 960, 7, 7)

# Test MobileNetV3 with norm_eval True, with_cp True and frozen_stages=5
model = MobileNetV3(norm_eval=True, with_cp=True, frozen_stages=5)
Expand All @@ -59,9 +59,9 @@ def test_mobilenet_v3():
model.init_weights()
model.train()

imgs = torch.randn(2, 3, 224, 224)
imgs = torch.randn(2, 3, 56, 56)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == (2, 16, 112, 112)
assert feat[1].shape == (2, 16, 56, 56)
assert feat[2].shape == (2, 576, 28, 28)
assert feat[0].shape == (2, 16, 28, 28)
assert feat[1].shape == (2, 16, 14, 14)
assert feat[2].shape == (2, 576, 7, 7)
Loading