-
Notifications
You must be signed in to change notification settings - Fork 4
/
eval.py
executable file
·79 lines (67 loc) · 2.42 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from __future__ import print_function
import torch
import torchvision
import torch.nn.functional as F
import torchvision.transforms as transforms
from tqdm import tqdm
from torch.autograd import Variable
from torchcv.transform import resize
from torchcv.datasets import ListDataset
from torchcv.evaluations.voc_eval import voc_eval
from torchcv.models import DSOD, SSDBoxCoder
from torchcv.utils.config import opt
from PIL import Image
import numpy as np
print('Loading model..')
net = DSOD(num_classes=21)
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
net.load_state_dict(torch.load(opt.load_path)['net'])
net.eval()
print('Preparing dataset..')
def caffe_normalize(x):
return transforms.Compose([
transforms.Lambda(lambda x:255*x[[2,1,0]]) ,
transforms.Normalize([104,117,123], (1,1,1)), # make it the same as caffe
# bgr and 0-255
])(x)
def transform(img, boxes, labels):
img, boxes = resize(img, boxes, size=(opt.img_size, opt.img_size))
img = transforms.Compose([
transforms.ToTensor(),
caffe_normalize
])(img)
return img, boxes, labels
dataset = ListDataset(root=opt.eval_img_root, list_file=opt.eval_img_list, transform=transform)
box_coder = SSDBoxCoder(net.module)
pred_boxes = []
pred_labels = []
pred_scores = []
gt_boxes = []
gt_labels = []
#with open('torchcv/datasets/voc/voc07_test_difficult.txt') as f:
# gt_difficults = []
# for line in f.readlines():
# line = line.strip().split()
# d = np.array([int(x) for x in line[1:]])
# gt_difficults.append(d)
print('Processing img..')
nums_img = dataset.__len__()
for i in tqdm(range(nums_img)):
inputs, box_targets, label_targets = dataset.__getitem__(i)
gt_boxes.append(box_targets)
gt_labels.append(label_targets)
inputs = inputs.unsqueeze(0)
with torch.no_grad():
loc_preds, cls_preds = net(Variable(inputs.cuda()))
box_preds, label_preds, score_preds = box_coder.decode(
loc_preds.cpu().data.squeeze(),
F.softmax(cls_preds.squeeze(), dim=1).cpu().data,
score_thresh=0.1)
pred_boxes.append(box_preds)
pred_labels.append(label_preds)
pred_scores.append(score_preds)
print('Caculating AP..')
aps = voc_eval(pred_boxes, pred_labels, pred_scores, gt_boxes, gt_labels, gt_difficults = None, iou_thresh=0.5, use_07_metric=False)
print('ap = ', aps['ap'])
print('map = ', aps['map'])