-
Notifications
You must be signed in to change notification settings - Fork 17
/
utils_experiment.py
105 lines (92 loc) · 3.85 KB
/
utils_experiment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import os
import json
import yaml
import logging
import argparse
#####################################
# UTILS #
#####################################
def read_args():
parser = argparse.ArgumentParser()
add_arg = parser.add_argument
# EXPERIMENT
add_arg('--root_dir', type=str, required=True,
help='Folder where train, val, test data is stored')
add_arg('--artifacts_dir', type=str, required=True,
help='Folder where all training artifacts are saved')
add_arg('--name', type=str, required=True,
help='Name of current training scheme')
add_arg('--run_nb', type=int, default=0,
help='Run number of current iteration of model')
add_arg('--debug', action='store_true',
help='Set flag to load small dataset')
add_arg('--beam_search', action='store_true',
help='Set flag to use beam search while evaluating')
add_arg('--model_type', type=str, default="bottom_up",
help='Model type')
add_arg('--beam_width', type=int, default=5,
help='Beam width (used in evaluation)')
# TRAINING
add_arg('--batch_size', type=int, default=1,
help='Minibatch size')
add_arg('--resume_epoch', type=int, default=0,
help='resume epoch number (0 if starting from scratch)')
add_arg('--max_nb_epochs', type=int, default=1000,
help='Maximum number of epochs to train')
add_arg('--lr', type=float, default=0.001,
help='Learning rate')
add_arg('--opt', type=str, default="Adam",
help='Optimization method')
add_arg('--teacher_forcing', type=float, default=0,
help='teacher forcing ratio')
return parser.parse_args()
def initialize_logger(experiment_dir):
logfile = os.path.join(experiment_dir, 'log.txt')
add_arg()
# TRAINING
add_arg('--batch_size', type=int, default=1,
help='Minibatch size')
add_arg('--resume_epoch', type=int, default=0,
help='resume epoch number (0 if starting from scratch)')
add_arg('--max_nb_epochs', type=int, default=1000,
help='Maximum number of epochs to train')
add_arg('--lr', type=float, default=0.001,
help='Learning rate')
add_arg('--opt', type=str, default="Adam",
help='Optimization method')
add_arg('--teacher_forcing', type=float, default=0,
help='teacher forcing ratio')
return parser.parse_args()
def initialize_logger(experiment_dir):
logfile = os.path.join(experiment_dir, 'log.txt')
logging.basicConfig(filename=logfile,format='%(message)s',level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
def save_args(args, experiment_dir, args_file):
args_path = os.path.join(experiment_dir, args_file)
with open(args_path, 'w') as f:
yaml.dump(args, f, default_flow_style=False)
logging.info("Args saved")
def load_args(experiment_dir, args_file):
args_path = os.path.join(experiment_dir, args_file)
with open(args_path, 'r') as f:
args = yaml.load(f)
logging.info("Args loaded")
return args
#########################################################
# SENTENCE OUTPUTS #
#########################################################
class Sentences(object):
def __init__(self, savedir):
self.sentences = []
if os.path.exists(savedir) == False:
os.makedirs(savedir)
self.filepath = os.path.join(savedir, "final_sentences.json")
def add_sentence(self, image_id, sentence):
caption = ' '.join(sentence[1:-1])
s = {'image_id':image_id, 'caption':caption}
if (image_id % 200) == 0:
print(s)
self.sentences.append(s)
def save_sentences(self):
with open(self.filepath, 'w') as f:
json.dump(self.sentences, f)