-
Notifications
You must be signed in to change notification settings - Fork 25
/
exp_tools.py
210 lines (167 loc) · 6.46 KB
/
exp_tools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import contextlib
import os
import sys
import jug
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import GPflow
import GPflow.minibatch as mb
import opt_tools
@jug.TaskGenerator
def jugrun_experiment(exp):
print("Running %s..." % exp.experiment_name)
exp.setup()
try:
exp.run()
except opt_tools.OptimisationTimeout:
print("Timeout")
@contextlib.contextmanager
def suppress_print():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def load_mnist():
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X = np.vstack((mnist.train.images.astype(float), mnist.validation.images.astype('float')))
Y = np.vstack((np.argmax(mnist.train.labels, 1)[:, None],
np.argmax(mnist.validation.labels, 1)[:, None]))
Xt = mnist.test.images.astype(float)
Yt = np.argmax(mnist.test.labels, 1)[:, None]
return X, Y, Xt, Yt
class ExperimentBase(object):
def __init__(self, name):
self.experiment_name = name
self.m = None
self.logger = None
self.X = None
self.Y = None
self.Xt = None
self.Yt = None
self.run_settings = {}
def setup_dataset(self, verbose=False):
raise NotImplementedError
def setup_model(self):
raise NotImplementedError
def setup_logger(self, verbose=False):
raise NotImplementedError
def setup(self, verbose=False):
"""
setup
Setup logger, model and anything else that isn't picklable.
:return:
"""
self.setup_dataset(verbose)
self.setup_model()
self.setup_logger(verbose)
return self.m, self.logger
def run(self, maxiter=np.inf):
optimiser = self.run_settings.get("optimiser", "adam")
if optimiser == "adam":
opt_method = tf.train.AdamOptimizer(self.run_settings['learning_rate'])
elif optimiser == "rmsprop":
opt_method = tf.train.RMSPropOptimizer(self.run_settings['learning_rate'])
else:
opt_method = optimiser
self.opt_method = opt_method
try:
return self.logger.optimize(method=opt_method, maxiter=maxiter, opt_options=self.run_settings)
finally:
self.logger.finish(self.m.get_free_state())
def profile(self):
"""
profile
Run a few iterations and dump the timeline.
:return:
"""
s = GPflow.settings.get_settings()
s.profiling.dump_timeline = True
s.profiling.output_file_name = "./trace_" + self.experiment_name
with GPflow.settings.temp_settings(s):
self.m._compile()
self.m._objective(self.m.get_free_state())
self.m._objective(self.m.get_free_state())
self.m._objective(self.m.get_free_state())
def load_results(self):
return pd.read_pickle(self.hist_path)
@property
def base_filename(self):
return os.path.join('.', 'results', self.experiment_name)
@property
def hist_path(self):
return self.base_filename + '_hist.pkl'
@property
def param_path(self):
return self.base_filename + '_params.pkl'
def __jug_hash__(self):
from jug.hash import hash_one
return hash_one(self.experiment_name)
class CifarExperiment(ExperimentBase):
def setup_dataset(self, verbose=False):
d = np.load('./datasets/cifar10.npz')
self.X = (d['X'] / 255.0).reshape(50000, 3, 32, 32).swapaxes(1, 3).reshape(50000, -1)
self.Y = d['Y'].astype('int64')
self.Xt = (d['Xt'] / 255.0).reshape(10000, 3, 32, 32).swapaxes(1, 3).reshape(10000, -1)
self.Yt = d['Yt'].astype('int64')
def img_plot(self, i):
import matplotlib.pyplot as plt
plt.imshow(self.X[i, :].reshape(32, 32, 3))
class MnistExperiment(ExperimentBase):
def setup_dataset(self, verbose=False):
with suppress_print():
self.X, self.Y, self.Xt, self.Yt = load_mnist()
class RectanglesImageExperiment(ExperimentBase):
def setup_dataset(self, verbose=False):
d = np.load('datasets/rectangles_im.npz')
self.X, self.Y, self.Xt, self.Yt = d['X'], d['Y'], d['Xtest'], d['Ytest']
def calculate_large_batch_lml(m, minibatch_size, batches, progress=False):
"""
This does not work properly yet, presumably because it changes the state (w.r.t. _parent) of the model.
"""
assert type(batches) == int, "`batches` must be an integer."
old_mbX = m.X
old_mbY = m.Y
m.X = mb.MinibatchData(m.X.value, minibatch_size,
batch_manager=mb.SequenceIndices(minibatch_size, m.X.value.shape[0]))
m.Y = mb.MinibatchData(m.Y.value, minibatch_size,
batch_manager=mb.SequenceIndices(minibatch_size, m.X.value.shape[0]))
m._kill_autoflow()
batch_lmls = []
if progress:
from tqdm import tqdm
for _ in tqdm(range(batches)):
batch_lmls.append(m.compute_log_likelihood())
else:
for _ in range(batches):
batch_lmls.append(m.compute_log_likelihood())
m.X = old_mbX
m.Y = old_mbY
m._kill_autoflow()
import gc
gc.collect()
return np.mean(batch_lmls)
class CalculateFullLMLMixin(object):
def _get_record(self, logger, x, f=None):
log_dict = super(CalculateFullLMLMixin, self)._get_record(logger, x, f)
model = logger.model
minibatch_size = logger.model.X.index_manager.minibatch_size
lml = calculate_large_batch_lml(model, minibatch_size, model.X.shape[0] // minibatch_size, True)
print("full lml: %f" % lml)
log_dict.update({"lml": lml})
return log_dict
class GPflowMultiClassificationTrackerLml(CalculateFullLMLMixin,
opt_tools.gpflow_tasks.GPflowMultiClassificationTracker):
pass
class GPflowTrackLml(opt_tools.tasks.GPflowLogOptimisation):
def _get_record(self, logger, x, f=None):
model = logger.model
minibatch_size = logger.model.X.index_manager.minibatch_size
lml = calculate_large_batch_lml(model, minibatch_size, model.X.shape[0] // minibatch_size, True)
print("full lml: %f" % lml)
return {"i": logger._i, "t": logger.model.num_fevals, "t": logger._opt_timer.elapsed_time,
"tt": logger._total_timer.elapsed_time, "lml": lml}