-
Notifications
You must be signed in to change notification settings - Fork 2
/
test_MambaDFuse.py
152 lines (133 loc) · 6.72 KB
/
test_MambaDFuse.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import argparse
from cgi import test
import numpy as np
from collections import OrderedDict
import os
import torch
import time
import sys
from torchinfo import summary
from models.network import MambaDFuse as net
from utils import utils_image as util
from data.dataloder import Dataset as D
from torch.utils.data import DataLoader
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2"
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='fusion', help='classical_sr, lightweight_sr, real_sr, '
'gray_dn, color_dn, jpeg_car')
parser.add_argument('--scale', type=int, default=1, help='scale factor: 1, 2, 3, 4, 8') # 1 for dn and jpeg car
parser.add_argument('--large_model', action='store_true', help='use large model, only provided for real image sr')
parser.add_argument('--model_path', type=str,
default='./Model/Infrared_Visible_Fusion/Infrared_Visible_Fusion/models/')
parser.add_argument('--iter_number', type=str,
default='10000')
parser.add_argument('--root_path', type=str, default='./Dataset/testsets/',
help='input test image root folder')
parser.add_argument('--dataset', type=str, default='MSRS',
help='input test image name')
parser.add_argument('--A_dir', type=str, default='IR',
help='input test image name')
parser.add_argument('--B_dir', type=str, default='VI_Y',
help='input test image name')
parser.add_argument('--tile', type=int, default=None, help='Tile size, None for no tile during testing (testing as a whole)')
parser.add_argument('--tile_overlap', type=int, default=32, help='Overlapping of different tiles')
parser.add_argument('--in_channel', type=int, default=1, help='3 means color image and 1 means gray image')
args = parser.parse_args()
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
# set up model
model_path = os.path.join(args.model_path, args.iter_number + '_G.pth')
if os.path.exists(model_path):
print(f'loading model from {args.model_path}')
else:
print('Traget model path: {} not existing!!!'.format(model_path))
sys.exit()
model = define_model(args)
model.eval()
model = model.to(device)
# setup folder and path
folder, save_dir, border, window_size = setup(args)
a_dir = os.path.join(args.root_path, args.dataset, args.A_dir)
b_dir = os.path.join(args.root_path, args.dataset, args.B_dir)
print(a_dir)
os.makedirs(save_dir, exist_ok=True)
test_set = D(a_dir, b_dir, args.in_channel)
test_loader = DataLoader(test_set, batch_size=1,
shuffle=False, num_workers=1,
drop_last=False, pin_memory=True)
for i, test_data in enumerate(test_loader):
imgname = test_data['A_path'][0]
img_a = test_data['A'].to(device)
img_b = test_data['B'].to(device)
start = time.time()
# inference
with torch.no_grad():
# pad input image to be a multiple of window_size
_, _, h_old, w_old = img_a.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
w_pad = (w_old // window_size + 1) * window_size - w_old
img_a = torch.cat([img_a, torch.flip(img_a, [2])], 2)[:, :, :h_old + h_pad, :]
img_a = torch.cat([img_a, torch.flip(img_a, [3])], 3)[:, :, :, :w_old + w_pad]
img_b = torch.cat([img_b, torch.flip(img_b, [2])], 2)[:, :, :h_old + h_pad, :]
img_b = torch.cat([img_b, torch.flip(img_b, [3])], 3)[:, :, :, :w_old + w_pad]
output = test(img_a, img_b, model, args, window_size)
output = output[..., :h_old * args.scale, :w_old * args.scale]
output = output.detach()[0].float().cpu()
end = time.time()
output = util.tensor2uint(output)
save_name = os.path.join(save_dir, os.path.basename(imgname))
util.imsave(output, save_name)
print("[{}/{}] Saving fused image to : {}, Processing time is {:4f} s".format(i+1, len(test_loader), save_name, end - start))
def define_model(args):
model = net(upscale=args.scale, in_chans=args.in_channel, img_size=128, window_size=8,
img_range=1., depths=[6, 6, 6, 6], embed_dim=60, num_heads=[6, 6, 6, 6],
mlp_ratio=2, upsampler=None, resi_connection='1conv')
param_key_g = 'params'
model_path = os.path.join(args.model_path, args.iter_number + '_E.pth')
pretrained_model = torch.load(model_path)
model.load_state_dict(pretrained_model[param_key_g] if param_key_g in pretrained_model.keys() else pretrained_model, strict=True)
return model
def setup(args):
save_dir = f'results/MambaDFuse_{args.dataset}'
folder = os.path.join(args.root_path, args.dataset, 'A_Y')
print('folder:', folder)
border = 0
window_size = 8
return folder, save_dir, border, window_size
def get_image_pair(args, path, a_dir=None, b_dir=None):
a_path = os.path.join(a_dir, os.path.basename(path))
b_path = os.path.join(b_dir, os.path.basename(path))
print("A image path:", a_path)
assert not args.in_channel == 3 or not args.in_channel == 1, "Error in input parameters "
img_a = util.imread_uint(a_path, args.in_channel)
img_b = util.imread_uint(b_path, args.in_channel)
img_a = util.uint2single(img_a)
img_b = util.uint2single(img_b)
return os.path.basename(path), img_a, img_b
def test(img_a, img_b, model, args, window_size):
if args.tile is None:
# test the image as a whole
output = model(img_a, img_b)
else:
# test the image tile by tile
b, c, h, w = img_a.size()
tile = min(args.tile, h, w)
assert tile % window_size == 0, "tile size should be a multiple of window_size"
tile_overlap = args.tile_overlap
sf = args.scale
stride = tile - tile_overlap
h_idx_list = list(range(0, h-tile, stride)) + [h-tile]
w_idx_list = list(range(0, w-tile, stride)) + [w-tile]
E = torch.zeros(b, c, h*sf, w*sf).type_as(img_a)
W = torch.zeros_like(E)
for h_idx in h_idx_list:
for w_idx in w_idx_list:
in_patch = img_a[..., h_idx:h_idx+tile, w_idx:w_idx+tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
E[..., h_idx*sf:(h_idx+tile)*sf, w_idx*sf:(w_idx+tile)*sf].add_(out_patch)
W[..., h_idx*sf:(h_idx+tile)*sf, w_idx*sf:(w_idx+tile)*sf].add_(out_patch_mask)
output = E.div_(W)
return output
if __name__ == '__main__':
main()