-
Notifications
You must be signed in to change notification settings - Fork 0
/
eval.py
222 lines (190 loc) · 11.1 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
#!/usr/bin/env python3
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
from datasets import *
from utils import *
from nltk.translate.bleu_score import corpus_bleu
import torch.nn.functional as F
from tqdm import tqdm
import argparse
import time
# import transformer, models
def evaluate_transformer(args):
"""
Evaluation for decoder_mode: transformer
:param beam_size: beam size at which to generate captions for evaluation
:return: BLEU-4 score
"""
beam_size = args.beam_size
Caption_End = False
# DataLoader
loader = torch.utils.data.DataLoader(
CaptionDataset(args.data_folder, args.data_name, 'TEST', transform=transforms.Compose([transforms.RandomHorizontalFlip(),normalize])),
batch_size=1, shuffle=False, num_workers=0, pin_memory=True)
# Lists to store references (true captions), and hypothesis (prediction) for each image
# If for n images, we have n hypotheses, and references a, b, c... for each image, we need -
# references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]
references = list()
hypotheses = list()
with torch.no_grad():
for i, (image, caps, caplens, allcaps) in enumerate(
tqdm(loader, desc="EVALUATING AT BEAM SIZE " + str(beam_size))):
# if i>30:
# break
if (i+1)%5 != 0:
continue
k = beam_size
# Move to GPU device, if available
image = image.to(device) # [1, 3, 256, 256]
# Encode
encoder_out = encoder(image) # [1, enc_image_size=14, enc_image_size=14, encoder_dim=2048]
enc_image_size = encoder_out.size(1)
encoder_dim = encoder_out.size(-1)
# We'll treat the problem as having a batch size of k, where k is beam_size
encoder_out = encoder_out.expand(k, enc_image_size, enc_image_size, encoder_dim) # [k, enc_image_size, enc_image_size, encoder_dim]
# Tensor to store top k previous words at each step; now they're just <start>
# Important: [1, 52] (eg: [[<start> <start> <start> ...]]) will not work, since it contains the position encoding
k_prev_words = torch.LongTensor([[word_map['<start>']]*52] * k).to(device) # (k, 52)
# Tensor to store top k sequences; now they're just <start>
seqs = torch.LongTensor([[word_map['<start>']]] * k).to(device) # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device)
# Lists to store completed sequences and scores
complete_seqs = []
complete_seqs_scores = []
step = 1
# Start decoding
# s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
while True:
# print("steps {} k_prev_words: {}".format(step, k_prev_words))
# cap_len = torch.LongTensor([52]).repeat(k, 1).to(device) may cause different sorted results on GPU/CPU in transformer.py
cap_len = torch.LongTensor([52]).repeat(k, 1) # [s, 1]
scores, _, _, _, _,_,_ = decoder(encoder_out, k_prev_words, cap_len)
scores = scores[:, step-1, :].squeeze(1) # [s, 1, vocab_size] -> [s, vocab_size]
scores = F.log_softmax(scores, dim=1)
# top_k_scores: [s, 1]
scores = top_k_scores.expand_as(scores) + scores # [s, vocab_size]
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words // vocab_size # (s)
next_word_inds = top_k_words % vocab_size # (s)
# Add new words to sequences
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
# Which sequences are incomplete (didn't reach <end>)?
incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if
next_word != word_map['<end>']]
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
Caption_End = True
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds])
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
# Important: this will not work, since decoder has self-attention
# k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1).repeat(k, 52)
k_prev_words = k_prev_words[incomplete_inds]
k_prev_words[:, :step+1] = seqs # [s, 52]
# k_prev_words[:, step] = next_word_inds[incomplete_inds] # [s, 52]
# Break if things have been going on too long
if step > 50:
break
step += 1
# choose the caption which has the best_score.
assert Caption_End
indices = complete_seqs_scores.index(max(complete_seqs_scores))
seq = complete_seqs[indices]
# References
img_caps = allcaps[0].tolist()
img_captions = list(
map(lambda c: [w for w in c if w not in {word_map['<start>'], word_map['<end>'], word_map['<pad>']}],
img_caps)) # remove <start> and pads
references.append(img_captions)
# Hypotheses
# tmp_hyp = [w for w in seq if w not in {word_map['<start>'], word_map['<end>'], word_map['<pad>']}]
hypotheses.append([w for w in seq if w not in {word_map['<start>'], word_map['<end>'], word_map['<pad>']}])
assert len(references) == len(hypotheses)
# Print References, Hypotheses and metrics every step
# words = []
# # print('*' * 10 + 'ImageCaptions' + '*' * 10, len(img_captions))
# for seq in img_captions:
# words.append([rev_word_map[ind] for ind in seq])
# for i, seq in enumerate(words):
# print('Reference{}: '.format(i), seq)
# print('Hypotheses: ', [rev_word_map[ind] for ind in tmp_hyp])
# metrics = get_eval_score([img_captions], [tmp_hyp])
# print("{} - beam size {}: BLEU-1 {} BLEU-2 {} BLEU-3 {} BLEU-4 {} METEOR {} ROUGE_L {} CIDEr {}".format
# (args.decoder_mode, args.beam_size, metrics["Bleu_1"], metrics["Bleu_2"], metrics["Bleu_3"],
# metrics["Bleu_4"],
# metrics["METEOR"], metrics["ROUGE_L"], metrics["CIDEr"]))
# Calculate BLEU1~4, METEOR, ROUGE_L, CIDEr scores
with open('./results/33LFC5lstm_NWPU.json', 'w') as file:
json.dump(hypotheses, file)
metrics = get_eval_score(references, hypotheses)
return metrics
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Image_Captioning')
parser.add_argument('--data_folder', default="./data/NWPU_images1",help=''
''
'folder with data files saved by create_input_files.py.')
parser.add_argument('--data_name', default="NWPU_5_cap_per_img_4_min_word_freq",help='base name shared by data files.')
# FIXME:note to change these
parser.add_argument('--encoder_mode', default="resnet50", help='which model does encoder use?') # inception_v3 or vgg16 or vgg19 or resnet50 or resnet101 or resnet152
parser.add_argument('--decoder_mode', default="lstm_attention", help='which model does decoder use?') # lstm or lstm_attention or transformer or transformer_decoder
parser.add_argument('--beam_size', type=int, default=3, help='beam_size.')
parser.add_argument('--path', default="./best_models_weights/", help='model checkpoint.')
args = parser.parse_args()
for encoder_layers, decoder_layers in [(3, 3)]: # ,,(0,6),(2,2),
args.encoder_layers = encoder_layers
args.decoder_layers = decoder_layers
word_map_file = os.path.join(args.data_folder, 'WORDMAP_' + args.data_name + '.json')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# transformer.device = torch.device("cpu")
# models.device = torch.device("cpu")
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
print(device)
# Load model
# checkpoint_path = args.checkpoint + args.encoder_mode + '_' + args.decoder_mode + '_'+ str(args.encoder_layers) + '_' + str(args.decoder_layers) +'_Res+MLAT'+'.pth.tar'
filename = os.listdir(args.path)
pathname = 'BEST_checkpoint_HCNet_NWPU.pth.tar'
print(time.strftime("%m-%d %H : %M : %S", time.localtime(time.time())))
checkpoint_path = os.path.join(args.path,pathname)
print(pathname)
checkpoint = torch.load(checkpoint_path, map_location=str(device))
decoder = checkpoint['decoder']
decoder = decoder.to(device)
decoder.eval()
encoder = checkpoint['encoder']
encoder = encoder.to(device)
encoder.eval()
# print(encoder)
# print(decoder)
# Load word map (word2id)
with open(word_map_file, 'r') as j:
word_map = json.load(j)
vocab_size = len(word_map)
rev_word_map = {v: k for k, v in word_map.items()} # ix2word
# Normalization transform
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if args.decoder_mode == "lstm_attention" or args.decoder_mode == "transformer_decoder":
metrics = evaluate_transformer(args)
print("{} - beam size {}: BLEU-1 {} BLEU-2 {} BLEU-3 {} BLEU-4 {} METEOR {} ROUGE_L {} CIDEr {}".format
(args.decoder_mode, args.beam_size, metrics["Bleu_1"], metrics["Bleu_2"], metrics["Bleu_3"], metrics["Bleu_4"],
metrics["METEOR"], metrics["ROUGE_L"], metrics["CIDEr"]))
print(time.strftime("%m-%d %H : %M : %S", time.localtime(time.time())))
print("\n")
print("\n")
print("\n")