forked from potterhsu/SVHNClassifier-PyTorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
alt_eval.py
63 lines (46 loc) · 2.27 KB
/
alt_eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import argparse
import json
import os
from .model import Model
from pathlib import Path
from .alt_evaluator import AltEvaluator
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data_dir', default='./data', help='directory to read LMDB files')
parser.add_argument('-l', '--logdir', default='./logs/evaluate', help='directory to write logs')
parser.add_argument('-ld', '--lmdb', default='train.lmdb', help='The lmdb file name to be evaluated')
parser.add_argument('-n', '--num_images', default=None, help='The number of images to be evaluated')
parser.add_argument('-ch', '--checkpoint', type=str, help='path to evaluate checkpoint, e.g. ./logs/model-100.pth')
def _eval(path_to_checkpoint_file, path_to_data_dir, path_to_log_dir, lmdb_file, number_of_images_to_evaluate):
path_to_eval_lmdb_dir = os.path.join(path_to_data_dir, lmdb_file)
model = Model()
model.restore(path_to_checkpoint_file)
model.cuda()
print(f'Evaluate {path_to_checkpoint_file} on {path_to_eval_lmdb_dir}')
results = AltEvaluator(path_to_eval_lmdb_dir, number_of_images_to_evaluate).evaluate(model)
return results
# export_evaluate_to_data_dir(path_to_data_dir, results, get_model_version(path_to_checkpoint_file))
def get_model_version(path_to_checkpoint_file):
start = path_to_checkpoint_file.find("-")+1
end = path_to_checkpoint_file.find(".pth")
substring = path_to_checkpoint_file[start:end]
return substring
def export_evaluate_to_data_dir(data_dir, data, model_version):
evaluate_dir = f"{data_dir}/evaluate"
Path(evaluate_dir).mkdir(parents=True, exist_ok=True)
export_to_json(evaluate_dir, data, model_version)
def export_to_json(path_to_log_dir, data, model_version):
with open(f'{path_to_log_dir}/data-{model_version}.json', 'w') as f:
json.dump(data, f)
def main(args):
number_of_images_to_evaluate = args.num_images
path_to_data_dir = args.data_dir
path_to_checkpoint_file = args.checkpoint
log_dir = args.logdir
lmdb_file = args.lmdb
get_model_version(path_to_checkpoint_file)
print('Start evaluate')
results = _eval(path_to_checkpoint_file, path_to_data_dir, log_dir, lmdb_file, number_of_images_to_evaluate)
print('Done')
return results
if __name__ == '__main__':
main(parser.parse_args())