-
Notifications
You must be signed in to change notification settings - Fork 71
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
new output class and add configuration output in ".xyz" format #42
Changes from all commits
20374f9
8361339
005bc81
35fa77f
407df11
847049e
caaa249
53fcd87
fd142cf
40df9bf
ba51183
0a67a68
83e742c
ccabfbf
a48290a
82ae8ac
54143ab
3574b55
27263b2
74c0ba1
52b4653
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -261,11 +261,11 @@ def get_like_grad_from_mats(ky_mat, hyp_mat, training_labels_np): | |
|
||
def get_neg_likelihood(hyps: np.ndarray, training_data: list, | ||
training_labels_np: np.ndarray, | ||
kernel, cutoffs=None, monitor: bool = False, | ||
kernel, cutoffs=None, output = None, | ||
par=False): | ||
|
||
if monitor: | ||
print('hyps: ' + str(hyps)) | ||
if output is not None: | ||
output.write_to_log('hyps: ' + str(hyps), name="hyps") | ||
|
||
if par: | ||
ky_mat = \ | ||
|
@@ -278,20 +278,19 @@ def get_neg_likelihood(hyps: np.ndarray, training_data: list, | |
|
||
like = get_like_from_ky_mat(ky_mat, training_labels_np) | ||
|
||
if monitor: | ||
print('like: ' + str(like)) | ||
print('\n') | ||
if output is not None: | ||
output.write_to_log('like: ' + str(like)+'\n', name="hyps") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. A suggestion which is somewhat to personal preference: I personally find formatting strings as |
||
|
||
return -like | ||
|
||
|
||
def get_neg_like_grad(hyps: np.ndarray, training_data: list, | ||
training_labels_np: np.ndarray, | ||
kernel_grad, cutoffs=None, | ||
monitor: bool = False, par=False): | ||
output = None, par=False): | ||
|
||
if monitor: | ||
print('hyps: ' + str(hyps)) | ||
if output is not None: | ||
output.write_to_log('hyps: ' + str(hyps)+'\n', name="hyps") | ||
|
||
if par: | ||
hyp_mat, ky_mat = \ | ||
|
@@ -305,9 +304,8 @@ def get_neg_like_grad(hyps: np.ndarray, training_data: list, | |
like, like_grad = \ | ||
get_like_grad_from_mats(ky_mat, hyp_mat, training_labels_np) | ||
|
||
if monitor: | ||
print('like grad: ' + str(like_grad)) | ||
print('like: ' + str(like)) | ||
print('\n') | ||
if output is not None: | ||
output.write_to_log('like grad: ' + str(like_grad)+'\n', name="hyps") | ||
output.write_to_log('like: ' + str(like)+'\n', name="hyps") | ||
|
||
return -like, -like_grad |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,7 +15,7 @@ | |
import numpy as np | ||
from copy import deepcopy | ||
import pickle | ||
import flare.output as output | ||
from flare.output import Output | ||
|
||
|
||
class TrajectoryTrainer(object): | ||
|
@@ -27,7 +27,7 @@ def __init__(self, frames: List[Structure], | |
parallel: bool = False, | ||
skip: int = 0, | ||
calculate_energy: bool = False, | ||
output_name: str = 'gp_from_aimd.out', | ||
output_name: str = 'gp_from_aimd', | ||
max_atoms_from_frame: int = np.inf, max_trains: int = np.inf, | ||
min_atoms_added: int = 1, | ||
n_cpus: int = 1, shuffle_frames: bool = False, | ||
|
@@ -92,7 +92,7 @@ def __init__(self, frames: List[Structure], | |
else: | ||
self.pred_func = predict_on_structure | ||
|
||
self.output_name = output_name | ||
self.output = Output(output_name) | ||
|
||
# set number of cpus for parallelization | ||
self.n_cpus = n_cpus | ||
|
@@ -119,16 +119,15 @@ def pre_run(self): | |
training set, then seed with at least one atom from each | ||
""" | ||
|
||
output.write_header(self.gp.cutoffs, | ||
self.gp.kernel_name, | ||
self.gp.hyps, | ||
self.gp.algo, | ||
dt=0, | ||
Nsteps=len(self.frames), | ||
structure=self.frames[0], | ||
std_tolerance=(self.rel_std_tolerance, | ||
self.abs_std_tolerance), | ||
output_name=self.output_name) | ||
self.output.write_header(self.gp.cutoffs, | ||
self.gp.kernel_name, | ||
self.gp.hyps, | ||
self.gp.algo, | ||
dt=0, | ||
Nsteps=len(self.frames), | ||
structure=self.frames[0], | ||
std_tolerance=(self.rel_std_tolerance, | ||
self.abs_std_tolerance)) | ||
|
||
self.start_time = time.time() | ||
|
||
|
@@ -166,7 +165,7 @@ def pre_run(self): | |
if (self.gp.l_mat is None) \ | ||
or (self.seed_frames is not None | ||
or self.seed_envs is not None): | ||
self.gp.train(monitor=self.verbose) | ||
self.gp.train(output=self.output if self.verbose > 0 else None) | ||
|
||
def run(self): | ||
""" | ||
|
@@ -190,9 +189,9 @@ def run(self): | |
mae = np.mean(np.abs(cur_frame.forces - dft_forces)) * 1000 | ||
mac = np.mean(np.abs(dft_forces)) * 1000 | ||
|
||
output.write_gp_dft_comparison( | ||
self.output.write_gp_dft_comparison( | ||
curr_step=i, frame=cur_frame, | ||
start_time=time.time(), output_name=self.output_name, | ||
start_time=time.time(), | ||
dft_forces=dft_forces, | ||
mae=mae, mac=mac, local_energies=None) | ||
|
||
|
@@ -207,7 +206,7 @@ def run(self): | |
if self.train_count < self.max_trains: | ||
self.train_gp() | ||
|
||
output.conclude_run(self.output_name) | ||
self.output.conclude_run() | ||
|
||
if self.pickle_name: | ||
with open(self.pickle_name, 'wb') as f: | ||
|
@@ -225,13 +224,11 @@ def update_gp_and_print(self, frame: Structure, train_atoms: List[int], | |
:return: | ||
""" | ||
|
||
output.write_to_output('\nAdding atom(s) {} to the ' | ||
'training set.\n' | ||
.format(train_atoms, ), | ||
self.output_name) | ||
output.write_to_output('Uncertainties: {}.\n' | ||
.format(frame.stds[train_atoms]), | ||
self.output_name) | ||
self.output.write_to_log('\nAdding atom(s) {} to the ' | ||
'training set.\n' | ||
.format(train_atoms, )) | ||
self.output.write_to_log('Uncertainties: {}.\n' | ||
.format(frame.stds[train_atoms])) | ||
|
||
# update gp model | ||
self.gp.update_db(frame, frame.forces, custom_range=train_atoms) | ||
|
@@ -244,11 +241,11 @@ def train_gp(self): | |
""" | ||
Train the Gaussian process and write the results to the output file. | ||
""" | ||
self.gp.train(monitor=True if self.verbose >= 2 else False) | ||
self.gp.train(output=self.output if self.verbose >= 2 else None) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I like the way you implemented this (passing output as the output object or None based on verbosity). |
||
|
||
output.write_hyps(self.gp.hyp_labels, self.gp.hyps, | ||
self.start_time, self.output_name, | ||
self.gp.like, self.gp.like_grad) | ||
self.output.write_hyps(self.gp.hyp_labels, self.gp.hyps, | ||
self.start_time, | ||
self.gp.like, self.gp.like_grad) | ||
self.train_count += 1 | ||
|
||
def is_std_in_bound(self, frame: Structure)->(bool, List[int]): | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Here and in a few other places where output is passed as an argument, we could typehint output as the
Output
object; in a few locations, we pass in an output location as a string (such as in the TrajectoryTrainer class). We should make sure it's typehinted there as well.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Type hints will be eliminated eventually, as they don't play well with the Sphinx documentation. Might be better to do without them.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
(The type of the input should be made clear in the docstring anyway)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sad to hear that Sphinx can't handle them. That's a shame. At least docstrings can contain the same info.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actually it looks like Sphinx only really struggles with np.ndarray type hints. Here's an example from the docs:
If we can find a way to get Sphinx to format that better, type hints should be okay.