-
Notifications
You must be signed in to change notification settings - Fork 0
/
create_dataset.py
75 lines (65 loc) · 3.46 KB
/
create_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import os
import cores.config as config
import scipy.io as sio
import numpy as np
import cores.utils.misc as misc
import shutil
from PIL import Image
import cPickle as pickle
#convert SBD data and VOC12 data to our format.
if __name__ == "__main__":
misc.my_mkdir(config.DATASET_PATH)
misc.my_mkdir(os.path.join(config.DATASET_PATH, config.VOC_TRAIN_IM_FOLDER))
misc.my_mkdir(os.path.join(config.DATASET_PATH, config.VOC_VAL_IM_FOLDER))
misc.my_mkdir(os.path.join(config.DATASET_PATH, config.VOC_VAL_MASK_FOLDER))
# process SBD
sbd_list = []
with open(os.path.join(config.SBD_PATH, "train.txt")) as f:
sbd_list += [i.strip() for i in f.readlines()]
with open(os.path.join(config.SBD_PATH, "val.txt")) as f:
sbd_list += [i.strip() for i in f.readlines()]
with open(os.path.join(config.VOCDEVKIT_PATH, "ImageSets", "Segmentation", "train.txt")) as f:
voc_train_list = [i.strip() for i in f.readlines()]
with open(os.path.join(config.VOCDEVKIT_PATH, "ImageSets", "Segmentation", "val.txt")) as f:
voc_val_list = [i.strip() for i in f.readlines()]
new_sbd_list = []
for i in sbd_list:
if i in voc_train_list or i in voc_val_list:
continue
new_sbd_list.append(i)
train_data_dict = {}
#for training set, only extract image level labels
for index, i in enumerate(new_sbd_list):
mask = sio.loadmat(os.path.join(config.SBD_PATH, "cls", i+".mat"))['GTcls']['Segmentation'][0][0]
il = np.unique(mask)
# 0 is bg, so in multi-label file, the bg is removed. VOC 21 classes become 20 classes.
image_labels = il[(il!=255)&(il!=0)] - 1
train_data_dict[i] = image_labels
shutil.copyfile(os.path.join(config.SBD_PATH, "img", i+".jpg"),
os.path.join(config.DATASET_PATH, config.VOC_TRAIN_IM_FOLDER, i+".jpg"))
print "processed %s in SBD\t%d/%d" % (i, index, len(new_sbd_list))
for index, i in enumerate(voc_train_list):
mask = Image.open(os.path.join(config.VOCDEVKIT_PATH, "SegmentationClass", i+".png"))
il = np.unique(mask)
image_labels = il[(il != 255) & (il != 0)] - 1
train_data_dict[i] = image_labels
shutil.copyfile(os.path.join(config.VOCDEVKIT_PATH, "JPEGImages", i+".jpg"),
os.path.join(config.DATASET_PATH, config.VOC_TRAIN_IM_FOLDER, i+".jpg"))
print "processed %s in VOC training set\t%d/%d" % (i, index, len(voc_train_list))
#for val set, save both masks and images
for index, i in enumerate(voc_val_list):
shutil.copyfile(os.path.join(config.VOCDEVKIT_PATH, "JPEGImages", i+".jpg"),
os.path.join(config.DATASET_PATH, config.VOC_VAL_IM_FOLDER, i+".jpg"))
shutil.copyfile(os.path.join(config.VOCDEVKIT_PATH, "SegmentationClass", i+".png"),
os.path.join(config.DATASET_PATH, config.VOC_VAL_MASK_FOLDER, i+".png"))
print "processed %s in VOC val set\t%d/%d" % (i, index, len(voc_val_list))
#save file list and multi-label file
print "saving files"
pickle.dump(train_data_dict, open(os.path.join(config.DATASET_PATH, config.VOC_TRAIN_MULTI_FILE), "wb"))
with open(os.path.join(config.DATASET_PATH, config.VOC_TRAIN_LIST), "w") as f:
for i in (new_sbd_list+voc_train_list):
f.write("%s\n" % i)
with open(os.path.join(config.DATASET_PATH, config.VOC_VAL_LIST), "w") as f:
for i in voc_val_list:
f.write("%s\n" % i)
print "done!"