Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

new output class and add configuration output in ".xyz" format #42

Merged
merged 21 commits into from
Sep 13, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,13 @@ pwscf.save/
/tests/ase_otf/kv3/
/tests/ase_otf/grid3*
/tests/test_outputs/*
/tests/*.xyz
/docs/source/_build/html
/tests/*-bak
/tests/*.out
/tests/*dat
/tests/alog
/tests/gp_from_aimd.out
/tests/h2_otf.out-f.xyz-bak
/tests/pwscf.xml

8 changes: 4 additions & 4 deletions flare/gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __init__(self, kernel: Callable,
energy_kernel: Callable = None,
opt_algorithm: str = 'L-BFGS-B',
maxiter=10, par=False,
monitor=False):
output=None):
Copy link
Contributor

@stevetorr stevetorr Sep 13, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here and in a few other places where output is passed as an argument, we could typehint output as the Output object; in a few locations, we pass in an output location as a string (such as in the TrajectoryTrainer class). We should make sure it's typehinted there as well.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Type hints will be eliminated eventually, as they don't play well with the Sphinx documentation. Might be better to do without them.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(The type of the input should be made clear in the docstring anyway)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sad to hear that Sphinx can't handle them. That's a shame. At least docstrings can contain the same info.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually it looks like Sphinx only really struggles with np.ndarray type hints. Here's an example from the docs:
image

If we can find a way to get Sphinx to format that better, type hints should be okay.

"""Initialize GP parameters and training data."""

self.kernel = kernel
Expand All @@ -45,7 +45,7 @@ def __init__(self, kernel: Callable,
self.likelihood = None
self.likelihood_gradient = None
self.par = par
self.monitor = monitor
self.output = output

# TODO unit test custom range
def update_db(self, struc: Structure, forces: list,
Expand Down Expand Up @@ -112,7 +112,7 @@ def force_list_to_np(forces: list) -> np.ndarray:

return forces_np

def train(self, monitor=False, custom_bounds=None,
def train(self, output=None, custom_bounds=None,
grad_tol: float = 1e-4,
x_tol: float = 1e-5,
line_steps: int = 20):
Expand All @@ -126,7 +126,7 @@ def train(self, monitor=False, custom_bounds=None,
x_0 = self.hyps

args = (self.training_data, self.training_labels_np,
self.kernel_grad, self.cutoffs, monitor,
self.kernel_grad, self.cutoffs, output,
self.par)

if self.algo == 'L-BFGS-B':
Expand Down
24 changes: 11 additions & 13 deletions flare/gp_algebra.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,11 +261,11 @@ def get_like_grad_from_mats(ky_mat, hyp_mat, training_labels_np):

def get_neg_likelihood(hyps: np.ndarray, training_data: list,
training_labels_np: np.ndarray,
kernel, cutoffs=None, monitor: bool = False,
kernel, cutoffs=None, output = None,
par=False):

if monitor:
print('hyps: ' + str(hyps))
if output is not None:
output.write_to_log('hyps: ' + str(hyps), name="hyps")

if par:
ky_mat = \
Expand All @@ -278,20 +278,19 @@ def get_neg_likelihood(hyps: np.ndarray, training_data: list,

like = get_like_from_ky_mat(ky_mat, training_labels_np)

if monitor:
print('like: ' + str(like))
print('\n')
if output is not None:
output.write_to_log('like: ' + str(like)+'\n', name="hyps")
Copy link
Contributor

@stevetorr stevetorr Sep 13, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A suggestion which is somewhat to personal preference: I personally find formatting strings as
'like: {} \n'.format(like)
to be more readable.
I know that you also just translated the existing line this way, but going forward we could do all of our string formatting this way.


return -like


def get_neg_like_grad(hyps: np.ndarray, training_data: list,
training_labels_np: np.ndarray,
kernel_grad, cutoffs=None,
monitor: bool = False, par=False):
output = None, par=False):

if monitor:
print('hyps: ' + str(hyps))
if output is not None:
output.write_to_log('hyps: ' + str(hyps)+'\n', name="hyps")

if par:
hyp_mat, ky_mat = \
Expand All @@ -305,9 +304,8 @@ def get_neg_like_grad(hyps: np.ndarray, training_data: list,
like, like_grad = \
get_like_grad_from_mats(ky_mat, hyp_mat, training_labels_np)

if monitor:
print('like grad: ' + str(like_grad))
print('like: ' + str(like))
print('\n')
if output is not None:
output.write_to_log('like grad: ' + str(like_grad)+'\n', name="hyps")
output.write_to_log('like: ' + str(like)+'\n', name="hyps")

return -like, -like_grad
53 changes: 25 additions & 28 deletions flare/gp_from_aimd.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import numpy as np
from copy import deepcopy
import pickle
import flare.output as output
from flare.output import Output


class TrajectoryTrainer(object):
Expand All @@ -27,7 +27,7 @@ def __init__(self, frames: List[Structure],
parallel: bool = False,
skip: int = 0,
calculate_energy: bool = False,
output_name: str = 'gp_from_aimd.out',
output_name: str = 'gp_from_aimd',
max_atoms_from_frame: int = np.inf, max_trains: int = np.inf,
min_atoms_added: int = 1,
n_cpus: int = 1, shuffle_frames: bool = False,
Expand Down Expand Up @@ -92,7 +92,7 @@ def __init__(self, frames: List[Structure],
else:
self.pred_func = predict_on_structure

self.output_name = output_name
self.output = Output(output_name)

# set number of cpus for parallelization
self.n_cpus = n_cpus
Expand All @@ -119,16 +119,15 @@ def pre_run(self):
training set, then seed with at least one atom from each
"""

output.write_header(self.gp.cutoffs,
self.gp.kernel_name,
self.gp.hyps,
self.gp.algo,
dt=0,
Nsteps=len(self.frames),
structure=self.frames[0],
std_tolerance=(self.rel_std_tolerance,
self.abs_std_tolerance),
output_name=self.output_name)
self.output.write_header(self.gp.cutoffs,
self.gp.kernel_name,
self.gp.hyps,
self.gp.algo,
dt=0,
Nsteps=len(self.frames),
structure=self.frames[0],
std_tolerance=(self.rel_std_tolerance,
self.abs_std_tolerance))

self.start_time = time.time()

Expand Down Expand Up @@ -166,7 +165,7 @@ def pre_run(self):
if (self.gp.l_mat is None) \
or (self.seed_frames is not None
or self.seed_envs is not None):
self.gp.train(monitor=self.verbose)
self.gp.train(output=self.output if self.verbose > 0 else None)

def run(self):
"""
Expand All @@ -190,9 +189,9 @@ def run(self):
mae = np.mean(np.abs(cur_frame.forces - dft_forces)) * 1000
mac = np.mean(np.abs(dft_forces)) * 1000

output.write_gp_dft_comparison(
self.output.write_gp_dft_comparison(
curr_step=i, frame=cur_frame,
start_time=time.time(), output_name=self.output_name,
start_time=time.time(),
dft_forces=dft_forces,
mae=mae, mac=mac, local_energies=None)

Expand All @@ -207,7 +206,7 @@ def run(self):
if self.train_count < self.max_trains:
self.train_gp()

output.conclude_run(self.output_name)
self.output.conclude_run()

if self.pickle_name:
with open(self.pickle_name, 'wb') as f:
Expand All @@ -225,13 +224,11 @@ def update_gp_and_print(self, frame: Structure, train_atoms: List[int],
:return:
"""

output.write_to_output('\nAdding atom(s) {} to the '
'training set.\n'
.format(train_atoms, ),
self.output_name)
output.write_to_output('Uncertainties: {}.\n'
.format(frame.stds[train_atoms]),
self.output_name)
self.output.write_to_log('\nAdding atom(s) {} to the '
'training set.\n'
.format(train_atoms, ))
self.output.write_to_log('Uncertainties: {}.\n'
.format(frame.stds[train_atoms]))

# update gp model
self.gp.update_db(frame, frame.forces, custom_range=train_atoms)
Expand All @@ -244,11 +241,11 @@ def train_gp(self):
"""
Train the Gaussian process and write the results to the output file.
"""
self.gp.train(monitor=True if self.verbose >= 2 else False)
self.gp.train(output=self.output if self.verbose >= 2 else None)
Copy link
Contributor

@stevetorr stevetorr Sep 13, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I like the way you implemented this (passing output as the output object or None based on verbosity).


output.write_hyps(self.gp.hyp_labels, self.gp.hyps,
self.start_time, self.output_name,
self.gp.like, self.gp.like_grad)
self.output.write_hyps(self.gp.hyp_labels, self.gp.hyps,
self.start_time,
self.gp.like, self.gp.like_grad)
self.train_count += 1

def is_std_in_bound(self, frame: Structure)->(bool, List[int]):
Expand Down
19 changes: 10 additions & 9 deletions flare/md_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import datetime
import concurrent.futures
from flare import md
from flare import output
from flare.output import Output


class MD:
Expand Down Expand Up @@ -42,12 +42,11 @@ def __init__(self, dt: float, number_of_steps: int, gp: GaussianProcess,
self.pes = []
self.kes = []

self.output_name = output_name
self.output = Output(output_name)

def run(self):
output.write_header(self.gp.cutoffs, self.gp.kernel_name, self.gp.hyps,
self.gp.algo, self.dt, self.Nsteps, self.structure,
self.output_name)
self.output.write_header(self.gp.cutoffs, self.gp.kernel_name, self.gp.hyps,
self.gp.algo, self.dt, self.Nsteps, self.structure)
self.start_time = time.time()

while self.curr_step < self.Nsteps:
Expand All @@ -59,7 +58,7 @@ def run(self):
self.update_positions(new_pos)
self.curr_step += 1

output.conclude_run(self.output_name)
self.output.conclude_run()

def predict_on_structure_en(self):
for n in range(self.structure.nat):
Expand Down Expand Up @@ -110,6 +109,8 @@ def update_temperature(self, new_pos):
def record_state(self):
self.pes.append(np.sum(self.local_energies))
self.kes.append(self.KE)
output.write_md_config(self.dt, self.curr_step, self.structure,
self.temperature, self.KE, self.local_energies,
self.start_time, self.output_name)
self.output.write_md_config(self.dt, self.curr_step, self.structure,
self.temperature, self.KE, self.local_energies,
self.start_time)
self.output.write_xyz_config(self.curr_step, self.structure,
self.dft_step)
Loading