Skip to content

Commit

Permalink
Merge pull request #386 from UCL-CCS/grid_search
Browse files Browse the repository at this point in the history
Grid search
  • Loading branch information
wedeling authored Mar 2, 2023
2 parents 6dd5ec6 + 4e194e0 commit e758f21
Show file tree
Hide file tree
Showing 54 changed files with 2,261 additions and 269 deletions.
2 changes: 1 addition & 1 deletion easyvvuq/actions/execute_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def start(self, previous=None):
for action in self.actions:
previous = self.wrapper(action, previous)
self.result = previous
assert(self.result['run_id'] == run_id)
assert (self.result['run_id'] == run_id)
return previous

def finished(self):
Expand Down
8 changes: 4 additions & 4 deletions easyvvuq/analysis/pce_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,8 +350,8 @@ def sobols(P, coefficients):
varied = [_ for _ in self.sampler.vary.get_keys()]
S1 = {_: np.zeros(sobol.shape[-1]) for _ in varied}
ST = {_: np.zeros(sobol.shape[-1]) for _ in varied}
#S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
#for v in varied: del S2[v][v]
# S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
# for v in varied: del S2[v][v]
S2 = {_: np.zeros((len(varied), sobol.shape[-1])) for _ in varied}
for n, si in enumerate(sobol_idx):
if len(si) == 1:
Expand All @@ -360,8 +360,8 @@ def sobols(P, coefficients):
elif len(si) == 2:
v1 = varied[si[0]]
v2 = varied[si[1]]
#S2[v1][v2] = sobol[n]
#S2[v2][v1] = sobol[n]
# S2[v1][v2] = sobol[n]
# S2[v2][v1] = sobol[n]
S2[v1][si[1]] = sobol[n]
S2[v2][si[0]] = sobol[n]
for i in si:
Expand Down
6 changes: 3 additions & 3 deletions easyvvuq/analysis/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def _get_sobols_general(self, getter, qoi=None, input_=None):
-------
dict or array
"""
assert(not ((qoi is None) and (input_ is not None)))
assert (not ((qoi is None) and (input_ is not None)))
if (qoi is not None) and (qoi not in self.qois):
raise RuntimeError('no such qoi in this analysis')
if (input_ is not None) and (input_ not in self.inputs):
Expand Down Expand Up @@ -349,7 +349,7 @@ def describe(self, qoi=None, statistic=None):
an array with the values for that statistic. Otherwise will return a DataFrame
with more data.
"""
assert(not ((qoi is None) and (statistic is not None)))
assert (not ((qoi is None) and (statistic is not None)))
statistics = ['mean', 'var', 'std', '1%', '10%', '90%', '99%', 'min', 'max', 'median']
qois = self.qois
if qoi is not None:
Expand All @@ -361,7 +361,7 @@ def describe(self, qoi=None, statistic=None):
for statistic_ in statistics:
try:
value = self._describe(qoi, statistic_)
assert(isinstance(value, np.ndarray))
assert (isinstance(value, np.ndarray))
for i, x in enumerate(value):
try:
result[(qoi, i)][statistic_] = x
Expand Down
8 changes: 4 additions & 4 deletions easyvvuq/analysis/sc_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ def adapt_dimension(self, qoi, data_frame, store_stats_history=True,
c_l = self.compute_comb_coef(l_norm=candidate_l_norm)
_, var_candidate_l, _ = self.get_pce_stats(
candidate_l_norm, self.pce_coefs[qoi], c_l)
#error in var
# error in var
error[tuple(l)] = np.linalg.norm(var_candidate_l - var_l, np.inf)
else:
logging.debug('Specified refinement method %s not recognized' % method)
Expand Down Expand Up @@ -467,7 +467,7 @@ def merge_accepted_and_admissible(self, level=0, **kwargs):
admissible_idx = np.array(admissible_idx).reshape([count, self.N])
merged_l = np.concatenate((self.l_norm, admissible_idx))
# make sure final result contains only unique indices and store
#results in l_norm
# results in l_norm
idx = np.unique(merged_l, axis=0, return_index=True)[1]
# return np.array([merged_l[i] for i in sorted(idx)])
self.l_norm = np.array([merged_l[i] for i in sorted(idx)])
Expand Down Expand Up @@ -894,7 +894,7 @@ def SC2PCE(self, samples, qoi, verbose=True, **kwargs):
for k in k_norm:
# product of the PCE basis function or order k - 1 and all
# Lagrange basis functions in a_1d, per dimension
#[[phi_k[0]*a_1d[0]], ..., [phi_k[N-1]*a_1d[N-1]]]
# [[phi_k[0]*a_1d[0]], ..., [phi_k[N-1]*a_1d[N-1]]]

# orthogonal polynomial generated by chaospy
phi_k = [cp.expansion.stieltjes(k[n] - 1,
Expand Down Expand Up @@ -1265,7 +1265,7 @@ def get_sobol_indices(self, qoi, typ='first_order'):
for i_u in range(wi_d_u.shape[0]):
D_u[u] += np.sign(np.prod(diff)) * h[i_u]**2 * wi_d_u[i_u].prod()

#D_u[u] = D_u[u].flatten()
# D_u[u] = D_u[u].flatten()

# all subsets of u
W = list(powerset(u))[0:-1]
Expand Down
4 changes: 2 additions & 2 deletions easyvvuq/db/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ def set_active_app(self, name):
selected = self.session.query(AppTable).filter_by(name=name).all()
if len(selected) == 0:
raise RuntimeError('no such app - {}'.format(name))
assert(not (len(selected) > 1))
assert (not (len(selected) > 1))
app = selected[0]
self.session.query(CampaignTable).update({'active_app': app.id})
self.session.commit()
Expand Down Expand Up @@ -519,7 +519,7 @@ def _get_campaign_info(self, campaign_name=None):
-------
SQLAlchemy query for campaign with this name.
"""
assert(isinstance(campaign_name, str) or campaign_name is None)
assert (isinstance(campaign_name, str) or campaign_name is None)
query = self.session.query(CampaignTable)
if campaign_name is None:
campaign_info = query
Expand Down
2 changes: 1 addition & 1 deletion easyvvuq/encoders/jinja_encoder.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import os
#from string import Template
# from string import Template
from jinja2 import Template
import logging

Expand Down
1 change: 1 addition & 0 deletions easyvvuq/sampling/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from .mc_sampler import MCSampler
from .csv_sampler import CSVSampler
from .dataframe_sampler import DataFrameSampler
from .grid_sampler import Grid_Sampler

__copyright__ = """
Expand Down
143 changes: 143 additions & 0 deletions easyvvuq/sampling/grid_sampler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
"""A grid sampler
Useful for e.g. hyperparameter search. The "vary" dict contains the values
that must be considered per (hyper)parameter, for instance:
vary = {"x1": [0.0, 0.5, 0.1],
"x2 = [1, 3],
"x3" = [True, False]}
The sampler will create a tensor grid using all specified 1D parameter
values.
"""

__author__ = "Wouter Edeling"
__copyright__ = """
Copyright 2018 Robin A. Richardson, David W. Wright
This file is part of EasyVVUQ
EasyVVUQ is free software: you can redistribute it and/or modify
it under the terms of the Lesser GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
EasyVVUQ is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
__license__ = "LGPL"

from itertools import product
import numpy as np
from .base import BaseSamplingElement # , Vary


class Grid_Sampler(BaseSamplingElement, sampler_name="grid_sampler"):

def __init__(self, vary, count=0):
"""
Initialize the grid sampler.
Parameters
----------
vary : dict, or list of dicts
A dictionary containing all 1D values for each parameter. For instance
vary = {"x1": [0.0, 0.5. 1.0], "x2": [True, False]}. This will
create a 2D tensor product of all (x1, x2) parameter combinations.
If a list of vary dicts is specified, each vary dict will be treated
independently to generate points. These dicts do not have to contain
the same parameters. The tensor product points are stored in the
'points' list, with one tensor product per vary dict.
count : int, optional
Internal counter used to count the number of samples that have
been executed. The default is 0.
Returns
-------
None.
"""
# allways add vary to list, even if only a single dict is specified
if not isinstance(vary, list):
vary = [vary]

self.vary = vary
self.count = count
self.points = []

# make sure all parameters are stored in a list or array, even
# if they have only a single value
for _vary in vary:
for param in _vary.keys():
if not isinstance(_vary[param], list) and not isinstance(_vary[param], np.ndarray):
vary[param] = [vary[param]]

# use dtype=object to allow for multiple different type (float, boolean etc)
self.points.append(np.array(list(product(*list(_vary.values()))), dtype=object))

# the cumulative sizes of all ensembles generated by the vary dicts
self.cumul_sizes = np.cumsum([points.shape[0] for points in self.points])
# add a zero to the beginning (necessary in __next__ subroutine)
self.cumul_sizes = np.insert(self.cumul_sizes, 0, 0)

def is_finite(self):
return True

def n_samples(self):
"""Returns the number of samples in this sampler.
"""
# return self.points.shape[0]
return self.cumul_sizes[-1]

def get_param_names(self):
"""
Get the names of all parameters that were varied.
Returns
-------
param_names : list
List of parameter names.
"""
param_names = []
for _vary in self.vary:
for name in _vary.keys():
if not name in param_names:
param_names.append(name)
return param_names

def __next__(self):
"""
Return the next sample from the input distributions.
Raises
------
StopIteration
Stop iteration when count >= n_samples.
Returns
-------
run_dict : dict
A dictionary with the random input samples, e.g.
{'x1': 0.5, 'x2': False}.
"""
if self.count < self.n_samples():
vary_idx = np.where(self.count < self.cumul_sizes[1:])[0][0]
run_dict = {}
i_par = 0
for param_name in self.vary[vary_idx].keys():
sample_idx = self.count - self.cumul_sizes[vary_idx]
run_dict[param_name] = self.points[vary_idx][sample_idx][i_par]
i_par += 1
self.count += 1
return run_dict
else:
raise StopIteration
22 changes: 11 additions & 11 deletions easyvvuq/sampling/simplex_stochastic_collocation.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def init_grid(self):
CONSEQUENCE: I NEED TO RE-MAKE A NEW 'Delaunay' OBJECT EVERYTIME THE GRID
IS REFINED.
"""
#tri = Delaunay(xi_k_jl, incremental=True)
# tri = Delaunay(xi_k_jl, incremental=True)
tri = Delaunay(xi_k_jl)

else:
Expand Down Expand Up @@ -590,15 +590,15 @@ def check_LEC_j(self, p_j, v, S_j, n_mc, queue):
Psi = self.compute_Psi(xi_Sj, p_j)

# check if Psi is well poised
#det_Psi = np.linalg.det(Psi)
# det_Psi = np.linalg.det(Psi)
# if det_Psi == 0:
# #print 'Warning: determinant Psi is zero.'
# #print 'Reducing local p_j from ' + str(p_j[j]) + ' to a lower value.'
# #return an error code
# return queue.put({'p_j[j]':-99, 'el_idx_j':el_idx_j})

# compute the coefficients c_jl
#c_jl = np.linalg.solve(Psi, v_Sj)
# c_jl = np.linalg.solve(Psi, v_Sj)
c_jl = DAFSILAS(Psi, v_Sj)

# check the LEC condition for all simplices in the STENCIL S_j
Expand Down Expand Up @@ -644,15 +644,15 @@ def check_LEC_j(self, p_j, v, S_j, n_mc, queue):
Psi = self.compute_Psi(xi_Sj, p_j)

# check if Psi is well poised
#det_Psi = np.linalg.det(Psi)
# det_Psi = np.linalg.det(Psi)
# if det_Psi == 0:
# #print 'Warning: determinant Psi is zero.'
# #print 'Reducing local p_j from ' + str(p_j[j]) + ' to a lower value.'
# #return an error code
# return queue.put({'p_j[j]':-99, 'el_idx_j':el_idx_j})

# compute the coefficients c_jl
#c_jl = np.linalg.solve(Psi, v_Sj)
# c_jl = np.linalg.solve(Psi, v_Sj)
c_jl = DAFSILAS(Psi, v_Sj, False)

if k == el_idx_j.size:
Expand Down Expand Up @@ -684,7 +684,7 @@ def compute_stencil_j(self):

for j in range(n_e):
# the number of points in S_j
#Np1_j = factorial(n_xi + p_j[j])/(factorial(n_xi)*factorial(p_j[j]))
# Np1_j = factorial(n_xi + p_j[j])/(factorial(n_xi)*factorial(p_j[j]))
# k = {1,...,n_s}\{k_j0, ..., k_jn_xi}
idx = np.delete(range(n_s), self.tri.simplices[j])
# store the vertex indices of the element itself
Expand Down Expand Up @@ -1055,7 +1055,7 @@ def surrogate(self, xi, S_j, p_j, v):
# print 'Error, det(Psi)=0 in compute_surplus_k() method, should not be possible'

# compute the coefficients c_jl
#c_jl = np.linalg.solve(Psi, v_Sj)
# c_jl = np.linalg.solve(Psi, v_Sj)
c_jl = DAFSILAS(Psi, v_Sj, False)

# compute the interpolation on the old grid
Expand Down Expand Up @@ -1240,7 +1240,7 @@ def DAFSILAS(A, b, print_message=False):
P = np.eye(n)

# the ill-condition control parameter
#epsilon = np.finfo(np.float64).eps
# epsilon = np.finfo(np.float64).eps
epsilon = 10**-14

for i in range(n - 1):
Expand All @@ -1266,9 +1266,9 @@ def DAFSILAS(A, b, print_message=False):
Ap[:, i + col] = tmp

# Also interchange the entries in b
#tmp = A[i, n]
# tmp = A[i, n]
# A[i, n] = A[i+col, n]Ap[i+1+j, i:m]
#A[i+col, n] = tmp
# A[i+col, n] = tmp

# keep track of column switches via a series of permuation matrices P =
# P1*P2*...*Pi*...*Pn ==> at each iteration x = P*xi
Expand Down Expand Up @@ -1305,7 +1305,7 @@ def DAFSILAS(A, b, print_message=False):

# ajj = 1, aij = 0 for j = i...n
Ap[idx[0]:n, idx[0]:n] = np.eye(nullity)
#bj = 0
# bj = 0
Ap[idx[0]:n, n] = 0
# ejj = 1, eij = 0
Ap[idx[0]:n, idx[0] + n + 1:m] = np.eye(nullity)
Expand Down
2 changes: 1 addition & 1 deletion easyvvuq/sampling/stochastic_collocation.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def __init__(self,
else:
self.l_norm = self.compute_sparse_multi_idx(self.L, self.N)
# create sparse grid of dimension N and level q using the 1d
#rules in self.xi_1d
# rules in self.xi_1d
self.xi_d = self.generate_grid(self.l_norm)

self._n_samples = self.xi_d.shape[0]
Expand Down
8 changes: 4 additions & 4 deletions tests/gauss/gauss_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,12 +60,12 @@
numbers += bias
numbers_out = np.array(list(enumerate(numbers)))

#header = 'Step,Value'
# header = 'Step,Value'

#fmt = '%i,%f'
#np.savetxt(output_filename, numbers_out, fmt=fmt, header=header)
# fmt = '%i,%f'
# np.savetxt(output_filename, numbers_out, fmt=fmt, header=header)

#json_output = {'numbers': list(numbers)}
# json_output = {'numbers': list(numbers)}
# with open(output_filename + '.json', 'wt') as json_fp:
# json.dump(json_output, json_fp)

Expand Down
Loading

0 comments on commit e758f21

Please sign in to comment.