Skip to content

Commit 2ca27a9

Browse files
authored
Add files via upload
1 parent f433e1c commit 2ca27a9

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+4949
-0
lines changed

DerainDataset.py

Lines changed: 279 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,279 @@
1+
import os
2+
import os.path
3+
import numpy as np
4+
import random
5+
import h5py
6+
import torch
7+
import cv2
8+
import glob
9+
import torch.utils.data as udata
10+
from utils import *
11+
12+
def normalize(data):
13+
return data / 255.
14+
15+
16+
def Im2Patch(img, win, stride=1):
17+
k = 0
18+
endc = img.shape[0]
19+
endw = img.shape[1]
20+
endh = img.shape[2]
21+
patch = img[:, 0:endw - win + 0 + 1:stride, 0:endh - win + 0 + 1:stride]
22+
TotalPatNum = patch.shape[1] * patch.shape[2]
23+
Y = np.zeros([endc, win * win, TotalPatNum], np.float32)
24+
25+
for i in range(win):
26+
for j in range(win):
27+
patch = img[:, i:endw - win + i + 1:stride, j:endh - win + j + 1:stride]
28+
Y[:, k, :] = np.array(patch[:]).reshape(endc, TotalPatNum)
29+
k = k + 1
30+
return Y.reshape([endc, win, win, TotalPatNum])
31+
32+
33+
def prepare_data_Rain12600(data_path, patch_size, stride):
34+
# train
35+
print('process training data')
36+
input_path = os.path.join(data_path, 'rainy_image')
37+
target_path = os.path.join(data_path, 'ground_truth')
38+
39+
save_target_path = os.path.join(data_path, 'train_target.h5')
40+
save_input_path = os.path.join(data_path, 'train_input.h5')
41+
42+
target_h5f = h5py.File(save_target_path, 'w')
43+
input_h5f = h5py.File(save_input_path, 'w')
44+
45+
train_num = 0
46+
for i in range(200):
47+
target_file = "%d.jpg" % (i + 1)
48+
target = cv2.imread(os.path.join(target_path,target_file))
49+
b, g, r = cv2.split(target)
50+
target = cv2.merge([r, g, b])
51+
52+
for j in range(14):
53+
input_file = "%d_%d.jpg" % (i+1, j+1)
54+
input_img = cv2.imread(os.path.join(input_path,input_file))
55+
b, g, r = cv2.split(input_img)
56+
input_img = cv2.merge([r, g, b])
57+
58+
target_img = target
59+
target_img = np.float32(normalize(target_img))
60+
target_patches = Im2Patch(target_img.transpose(2,0,1), win=patch_size, stride=stride)
61+
62+
input_img = np.float32(normalize(input_img))
63+
input_patches = Im2Patch(input_img.transpose(2, 0, 1), win=patch_size, stride=stride)
64+
print("target file: %s # samples: %d" % (input_file, target_patches.shape[3]))
65+
66+
for n in range(target_patches.shape[3]):
67+
target_data = target_patches[:, :, :, n].copy()
68+
target_h5f.create_dataset(str(train_num), data=target_data)
69+
70+
input_data = input_patches[:, :, :, n].copy()
71+
input_h5f.create_dataset(str(train_num), data=input_data)
72+
train_num += 1
73+
74+
target_h5f.close()
75+
input_h5f.close()
76+
print('training set, # samples %d\n' % train_num)
77+
78+
79+
def prepare_data_RainTrainH(data_path, patch_size, stride):
80+
# train
81+
print('process training data')
82+
input_path = os.path.join(data_path)
83+
target_path = os.path.join(data_path)
84+
85+
save_target_path = os.path.join(data_path, 'train_target.h5')
86+
save_input_path = os.path.join(data_path, 'train_input.h5')
87+
88+
target_h5f = h5py.File(save_target_path, 'w')
89+
input_h5f = h5py.File(save_input_path, 'w')
90+
91+
train_num = 0
92+
for i in range(1800):
93+
target_file = "norain-%d.png" % (i + 1)
94+
if os.path.exists(os.path.join(target_path,target_file)):
95+
96+
target = cv2.imread(os.path.join(target_path,target_file))
97+
b, g, r = cv2.split(target)
98+
target = cv2.merge([r, g, b])
99+
100+
input_file = "rain-%d.png" % (i + 1)
101+
102+
if os.path.exists(os.path.join(input_path,input_file)): # we delete 546 samples
103+
104+
input_img = cv2.imread(os.path.join(input_path,input_file))
105+
b, g, r = cv2.split(input_img)
106+
input_img = cv2.merge([r, g, b])
107+
108+
target_img = target
109+
target_img = np.float32(normalize(target_img))
110+
target_patches = Im2Patch(target_img.transpose(2,0,1), win=patch_size, stride=stride)
111+
112+
input_img = np.float32(normalize(input_img))
113+
input_patches = Im2Patch(input_img.transpose(2, 0, 1), win=patch_size, stride=stride)
114+
115+
print("target file: %s # samples: %d" % (input_file, target_patches.shape[3]))
116+
117+
for n in range(target_patches.shape[3]):
118+
target_data = target_patches[:, :, :, n].copy()
119+
target_h5f.create_dataset(str(train_num), data=target_data)
120+
121+
input_data = input_patches[:, :, :, n].copy()
122+
input_h5f.create_dataset(str(train_num), data=input_data)
123+
124+
train_num += 1
125+
126+
target_h5f.close()
127+
input_h5f.close()
128+
129+
print('training set, # samples %d\n' % train_num)
130+
131+
132+
133+
def prepare_data_RainTrainL(data_path, patch_size, stride):
134+
# train
135+
print('process training data')
136+
input_path = os.path.join(data_path, 'input/')
137+
target_path = os.path.join(data_path, 'target/')
138+
139+
save_target_path = os.path.join(data_path, 'train_target.h5')
140+
save_input_path = os.path.join(data_path, 'train_input.h5')
141+
142+
target_h5f = h5py.File(save_target_path, 'w')
143+
input_h5f = h5py.File(save_input_path, 'w')
144+
145+
train_num = 0
146+
for i in range(1800):
147+
target_file = "norain-%d.png" % (i + 1)
148+
if os.path.exists(os.path.join(target_path,target_file)):
149+
150+
target = cv2.imread(os.path.join(target_path,target_file))
151+
b, g, r = cv2.split(target)
152+
target = cv2.merge([r, g, b])
153+
154+
input_file = "norain-%dx2.png" % (i + 1)
155+
156+
if os.path.exists(os.path.join(input_path,input_file)): # we delete 546 samples
157+
158+
input_img = cv2.imread(os.path.join(input_path,input_file))
159+
b, g, r = cv2.split(input_img)
160+
input_img = cv2.merge([r, g, b])
161+
162+
target_img = target
163+
target_img = np.float32(normalize(target_img))
164+
target_patches = Im2Patch(target_img.transpose(2,0,1), win=patch_size, stride=stride)
165+
166+
input_img = np.float32(normalize(input_img))
167+
input_patches = Im2Patch(input_img.transpose(2, 0, 1), win=patch_size, stride=stride)
168+
169+
print("target file: %s # samples: %d" % (input_file, target_patches.shape[3]))
170+
171+
for n in range(target_patches.shape[3]):
172+
target_data = target_patches[:, :, :, n].copy()
173+
target_h5f.create_dataset(str(train_num), data=target_data)
174+
175+
input_data = input_patches[:, :, :, n].copy()
176+
input_h5f.create_dataset(str(train_num), data=input_data)
177+
178+
train_num += 1
179+
180+
target_h5f.close()
181+
input_h5f.close()
182+
183+
print('training set, # samples %d\n' % train_num)
184+
185+
186+
# def prepare_data_RainTrainL(data_path, patch_size, stride):
187+
# # train
188+
# print('process training data')
189+
# input_path = os.path.join(data_path)
190+
# target_path = os.path.join(data_path)
191+
#
192+
# save_target_path = os.path.join(data_path, 'train_target.h5')
193+
# save_input_path = os.path.join(data_path, 'train_input.h5')
194+
#
195+
# target_h5f = h5py.File(save_target_path, 'w')
196+
# input_h5f = h5py.File(save_input_path, 'w')
197+
#
198+
# train_num = 0
199+
# for i in range(200):
200+
# # target_file = "norain-%d.png" % (i + 1)
201+
# target_file = "JPEGImages0/%d.jpg" % (i + 1)
202+
# print(os.path.join(input_path, target_file))
203+
# target = cv2.imread(os.path.join(target_path,target_file))
204+
# b, g, r = cv2.split(target)
205+
# target = cv2.merge([r, g, b])
206+
#
207+
# for j in range(2):
208+
# # input_file = "rain-%d.png" % (i + 1)
209+
# input_file = "JPEGImages1/%d.jpg" % (i + 1)
210+
# input_img = cv2.imread(os.path.join(input_path,input_file))
211+
# b, g, r = cv2.split(input_img)
212+
# input_img = cv2.merge([r, g, b])
213+
#
214+
# target_img = target
215+
#
216+
# if j == 1:
217+
# target_img = cv2.flip(target_img, 1)
218+
# input_img = cv2.flip(input_img, 1)
219+
#
220+
# target_img = np.float32(normalize(target_img))
221+
# target_patches = Im2Patch(target_img.transpose(2,0,1), win=patch_size, stride=stride)
222+
#
223+
# input_img = np.float32(normalize(input_img))
224+
# input_patches = Im2Patch(input_img.transpose(2, 0, 1), win=patch_size, stride=stride)
225+
#
226+
# print("target file: %s # samples: %d" % (input_file, target_patches.shape[3]))
227+
# for n in range(target_patches.shape[3]):
228+
# target_data = target_patches[:, :, :, n].copy()
229+
# target_h5f.create_dataset(str(train_num), data=target_data)
230+
#
231+
# input_data = input_patches[:, :, :, n].copy()
232+
# input_h5f.create_dataset(str(train_num), data=input_data)
233+
#
234+
# train_num += 1
235+
#
236+
# target_h5f.close()
237+
# input_h5f.close()
238+
#
239+
# print('training set, # samples %d\n' % train_num)
240+
241+
242+
class Dataset(udata.Dataset):
243+
def __init__(self, data_path='.'):
244+
super(Dataset, self).__init__()
245+
246+
self.data_path = data_path
247+
248+
target_path = os.path.join(self.data_path, 'train_target.h5')
249+
input_path = os.path.join(self.data_path, 'train_input.h5')
250+
251+
target_h5f = h5py.File(target_path, 'r')
252+
input_h5f = h5py.File(input_path, 'r')
253+
254+
self.keys = list(target_h5f.keys())
255+
random.shuffle(self.keys)
256+
target_h5f.close()
257+
input_h5f.close()
258+
259+
def __len__(self):
260+
return len(self.keys)
261+
262+
def __getitem__(self, index):
263+
264+
target_path = os.path.join(self.data_path, 'train_target.h5')
265+
input_path = os.path.join(self.data_path, 'train_input.h5')
266+
267+
target_h5f = h5py.File(target_path, 'r')
268+
input_h5f = h5py.File(input_path, 'r')
269+
270+
key = self.keys[index]
271+
target = np.array(target_h5f[key])
272+
input = np.array(input_h5f[key])
273+
274+
target_h5f.close()
275+
input_h5f.close()
276+
277+
return torch.Tensor(input), torch.Tensor(target)
278+
279+

0 commit comments

Comments
 (0)