Skip to content

Commit

Permalink
optimization fix for bs-disp, bug fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
deronsmith committed Jun 11, 2024
1 parent 129cced commit e79a3a4
Show file tree
Hide file tree
Showing 11 changed files with 426,327 additions and 554,036 deletions.
4 changes: 3 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ default = ["extension-module"]

[profile.dev]
opt-level = 3
debug = false

[profile.release]
opt-level = 3
opt-level = 3
debug = false
Binary file modified data/output/bs_test00.pkl
Binary file not shown.
Binary file modified data/output/sa_test00.pkl
Binary file not shown.
8 changes: 4 additions & 4 deletions esat/error/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,10 +389,10 @@ def _train(self,
#TODO: Implement parallelization
for i in tqdm(range(1, self.bootstrap_n+1), desc="Bootstrap resampling, training and mapping"):
sample_seed = self.rng.integers(low=0, high=1e10, size=1)
_V = copy.deepcopy(self.data)
_U = copy.deepcopy(self.uncertainty)
_W = copy.deepcopy(self.base_W)
_H = copy.deepcopy(self.base_H)
_V = copy.deepcopy(self.data)
_U = copy.deepcopy(self.uncertainty)
_W = copy.deepcopy(self.base_W)
_H = copy.deepcopy(self.base_H)
train_seed = sample_seed
if block:
bs_data, bs_uncertainty, bs_W, bs_index = self._block_resample(data=_V,
Expand Down
25 changes: 14 additions & 11 deletions esat/error/bs_disp.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,17 +149,20 @@ def run(self,
if parallel:
cpus = mp.cpu_count()
cpus = cpus - 1 if cpus > 1 else 1
with mp.Pool(processes=cpus) as pool:
p_args = []
for i, bs_key in enumerate(bs_keys):
i_model = self.bootstrap.bs_results[bs_key]["model"]
i_args = (bs_key, i_model, self.feature_labels, self.model_selected, self.threshold_dQ,
self.max_search, self.features, self.dQmax)
p_args.append(i_args)

for result in pool.starmap(BSDISP._parallel_disp, p_args, chunksize=10):
i, i_disp = result
self.disp_results[i] = i_disp
pool = mp.Pool(processes=cpus)
p_args = []
for i, bs_key in enumerate(bs_keys):
i_model = self.bootstrap.bs_results[bs_key]["model"]
i_args = (bs_key, i_model, self.feature_labels, self.model_selected, self.threshold_dQ,
self.max_search, self.features, self.dQmax)
p_args.append(i_args)
results = pool.starmap(BSDISP._parallel_disp, p_args)
pool.close()
pool.join()
# for result in pool.starmap(BSDISP._parallel_disp, p_args, chunksize=10):
for result in results:
i, i_disp = result
self.disp_results[i] = i_disp
else:
for bs_key in tqdm(bs_keys, desc="BS-DISP - Displacement Stage", position=0, leave=True):
bs_result = self.bootstrap.bs_results[bs_key]
Expand Down
4 changes: 2 additions & 2 deletions esat/model/batch_sa.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,8 +274,8 @@ def _train_task(self, sa, model_i) -> (int, SA):
t1 = time.time()
if self.verbose:
logger.info(f"Model: {model_i}, Seed: {sa.seed}, "
f"Q(true): {round(sa.Qtrue, 4)}, MSE(true): {round(sa.Qtrue/sa.V.shape[0], 4)}, "
f"Q(robust): {round(sa.Qrobust, 4)}, MSE(robust): {round(sa.Qrobust/sa.V.shape[0], 4)},"
f"Q(true): {round(sa.Qtrue, 4)}, MSE(true): {round(sa.Qtrue/sa.V.size, 4)}, "
f"Q(robust): {round(sa.Qrobust, 4)}, MSE(robust): {round(sa.Qrobust/sa.V.size, 4)}, "
f"Steps: {sa.converge_steps}/{self.max_iter}, Converged: {sa.converged}, "
f"Runtime: {round(t1 - t0, 2)} sec")
return model_i, sa
Expand Down
9,398 changes: 8,555 additions & 843 deletions notebooks/.ipynb_checkpoints/epa_esat_comparative_analysis-checkpoint.ipynb

Large diffs are not rendered by default.

Loading

0 comments on commit e79a3a4

Please sign in to comment.