Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/reassign centr bydflt #527

Merged
merged 18 commits into from
Aug 12, 2022
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
e130f3d
cosmetics
emanuel-schmid Jul 20, 2022
542aabf
impact_calc.ImpactCalc.minimal_exp_gdf: change default to assign_cent…
emanuel-schmid Jul 20, 2022
3f5e3ca
Merge branch 'develop' into feature/reassign_centr_bydflt
emanuel-schmid Aug 4, 2022
d7834e2
impact_calc: introduce optonal reassign_centroids argument
emanuel-schmid Aug 4, 2022
50e6469
test_impact_calc: fix failing tests (due to reassignment)
emanuel-schmid Aug 4, 2022
7fda806
Merge branch 'develop' into feature/reassign_centr_bydflt
emanuel-schmid Aug 5, 2022
bfc94ae
forecast.Forecast.calc: simply pass the force_reassign parameter to t…
emanuel-schmid Aug 5, 2022
5df35b7
Apply suggestions from code review
emanuel-schmid Aug 5, 2022
bad2513
rename `reassign_centroids` arguments to `assign_centroids`
emanuel-schmid Aug 5, 2022
9218ba0
calibration_opt: skip centroids assignment in calib_instance
emanuel-schmid Aug 5, 2022
648c030
fix typo
emanuel-schmid Aug 5, 2022
caf8dfd
PEP8 cosmetics
emanuel-schmid Aug 5, 2022
f0c609d
forecast.calc: there is no implicit centroid assignment in impact.cal…
emanuel-schmid Aug 5, 2022
8ed6bbe
forecastforecast.calc: there is no implicit centroid assignment in im…
emanuel-schmid Aug 5, 2022
4834f79
minimize centroid assignment in uncertainty, cost_benefit and measures
emanuel-schmid Aug 5, 2022
d764833
measures.test: explicit centroid assignment before _cutoff
emanuel-schmid Aug 8, 2022
b7962ff
unsequa.calc_cost_benefit: assign centroids manually, w/o overwriting
emanuel-schmid Aug 9, 2022
8c5ebe0
docstring consolidation
emanuel-schmid Aug 9, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 25 additions & 23 deletions climada/engine/calibration_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,26 +66,26 @@ def calib_instance(hazard, exposure, impact_func, df_out=pd.DataFrame(),
DataFrame with modelled impact written to rows for each year
or event.
"""
IFS = ImpactFuncSet()
IFS.append(impact_func)
ifs = ImpactFuncSet()
ifs.append(impact_func)
impacts = Impact()
impacts.calc(exposure, IFS, hazard)
impacts.calc(exposure, ifs, hazard, assign_centroids=False)
if yearly_impact: # impact per year
IYS = impacts.calc_impact_year_set(all_years=True)
iys = impacts.calc_impact_year_set(all_years=True)
# Loop over whole year range:
if df_out.empty | df_out.index.shape[0] == 1:
for cnt_, year in enumerate(np.sort(list((IYS.keys())))):
for cnt_, year in enumerate(np.sort(list((iys.keys())))):
if cnt_ > 0:
df_out.loc[cnt_] = df_out.loc[0] # copy info from first row
if year in IYS:
df_out.loc[cnt_, 'impact_CLIMADA'] = IYS[year]
if year in iys:
df_out.loc[cnt_, 'impact_CLIMADA'] = iys[year]
else:
df_out.loc[cnt_, 'impact_CLIMADA'] = 0.0
df_out.loc[cnt_, 'year'] = year
else:
years_in_common = df_out.loc[df_out['year'].isin(np.sort(list((IYS.keys())))), 'year']
years_in_common = df_out.loc[df_out['year'].isin(np.sort(list((iys.keys())))), 'year']
for cnt_, year in years_in_common.iteritems():
df_out.loc[df_out['year'] == year, 'impact_CLIMADA'] = IYS[year]
df_out.loc[df_out['year'] == year, 'impact_CLIMADA'] = iys[year]


else: # impact per event
Expand Down Expand Up @@ -138,21 +138,21 @@ def init_impf(impf_name_or_instance, param_dict, df_out=pd.DataFrame(index=[0]))
(index=0) defined with values. The impact function parameters from
param_dict are represented here.
"""
ImpactFunc_final = None
impact_func_final = None
if isinstance(impf_name_or_instance, str):
if impf_name_or_instance == 'emanuel':
ImpactFunc_final = ImpfTropCyclone.from_emanuel_usa(**param_dict)
ImpactFunc_final.haz_type = 'TC'
ImpactFunc_final.id = 1
impact_func_final = ImpfTropCyclone.from_emanuel_usa(**param_dict)
impact_func_final.haz_type = 'TC'
impact_func_final.id = 1
df_out['impact_function'] = impf_name_or_instance
elif isinstance(impf_name_or_instance, impact_funcs.ImpactFunc):
ImpactFunc_final = change_impf(impf_name_or_instance, param_dict)
impact_func_final = change_impf(impf_name_or_instance, param_dict)
df_out['impact_function'] = ('given_' +
ImpactFunc_final.haz_type +
str(ImpactFunc_final.id))
impact_func_final.haz_type +
str(impact_func_final.id))
for key, val in param_dict.items():
df_out[key] = val
return ImpactFunc_final, df_out
return impact_func_final, df_out

def change_impf(impf_instance, param_dict):
"""apply a shifting or a scaling defined in param_dict to the impact
Expand Down Expand Up @@ -333,6 +333,7 @@ def calib_all(hazard, exposure, impf_name_or_instance, param_full_dict,
# prepare hazard and exposure
region_ids = list(np.unique(exposure.region_id))
hazard_type = hazard.tag.haz_type
exposure.assign_centroids(hazard)
# prepare impact data
if isinstance(impact_data_source, pd.DataFrame):
df_impact_data = impact_data_source
Expand All @@ -347,8 +348,8 @@ def calib_all(hazard, exposure, impf_name_or_instance, param_full_dict,
for param_dict in params_generator:
print(param_dict)
df_out = copy.deepcopy(df_impact_data)
ImpactFunc_final, df_out = init_impf(impf_name_or_instance, param_dict, df_out)
df_out = calib_instance(hazard, exposure, ImpactFunc_final, df_out, yearly_impact)
impact_func_final, df_out = init_impf(impf_name_or_instance, param_dict, df_out)
df_out = calib_instance(hazard, exposure, impact_func_final, df_out, yearly_impact)
if df_result is None:
df_result = copy.deepcopy(df_out)
else:
Expand Down Expand Up @@ -398,6 +399,7 @@ def calib_optimize(hazard, exposure, impf_name_or_instance, param_dict,
# prepare hazard and exposure
region_ids = list(np.unique(exposure.region_id))
hazard_type = hazard.tag.haz_type
exposure.assign_centroids(hazard)
# prepare impact data
if isinstance(impact_data_source, pd.DataFrame):
df_impact_data = impact_data_source
Expand All @@ -408,8 +410,8 @@ def calib_optimize(hazard, exposure, impf_name_or_instance, param_dict,
else:
raise ValueError('other impact data sources not yet implemented.')
# definie specific function to
def specific_calib(x):
param_dict_temp = dict(zip(param_dict.keys(), x))
def specific_calib(values):
param_dict_temp = dict(zip(param_dict.keys(), values))
print(param_dict_temp)
return calib_instance(hazard, exposure,
init_impf(impf_name_or_instance, param_dict_temp)[0],
Expand All @@ -427,8 +429,8 @@ def specific_calib(x):
{'type': 'ineq', 'fun': lambda x: x[1]}]


x0 = list(param_dict.values())
res = minimize(specific_calib, x0,
values = list(param_dict.values())
res = minimize(specific_calib, values,
# bounds=bounds,
# bounds=((0.0, np.inf), (0.0, np.inf), (0.0, 1.0)),
constraints=cons,
Expand Down
22 changes: 15 additions & 7 deletions climada/engine/cost_benefit.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def __init__(self):
self.imp_meas_present = dict()

def calc(self, hazard, entity, haz_future=None, ent_future=None,
future_year=None, risk_func=risk_aai_agg, imp_time_depen=None, save_imp=False):
future_year=None, risk_func=risk_aai_agg, imp_time_depen=None, save_imp=False, assign_centroids=True):
"""Compute cost-benefit ratio for every measure provided current
and, optionally, future conditions. Present and future measures need
to have the same name. The measures costs need to be discounted by the user.
Expand Down Expand Up @@ -203,6 +203,14 @@ def calc(self, hazard, entity, haz_future=None, ent_future=None,
if future_year is None and ent_future is None:
future_year = entity.exposures.ref_year

# assign centroids
if assign_centroids:
entity.exposures.assign_centroids(hazard, overwrite=True)
if ent_future:
ent_future.exposures.assign_centroids(
haz_future if haz_future else hazard, overwrite=True
)

if not haz_future and not ent_future:
self.future_year = future_year
self._calc_impact_measures(hazard, entity.exposures,
Expand Down Expand Up @@ -525,11 +533,11 @@ def plot_waterfall(hazard, entity, haz_future, ent_future,
future_year = ent_future.exposures.ref_year

imp = Impact()
imp.calc(entity.exposures, entity.impact_funcs, hazard)
imp.calc(entity.exposures, entity.impact_funcs, hazard, assign_centroids=False)
curr_risk = risk_func(imp)

imp = Impact()
imp.calc(ent_future.exposures, ent_future.impact_funcs, haz_future)
imp.calc(ent_future.exposures, ent_future.impact_funcs, haz_future, assign_centroids=False)
fut_risk = risk_func(imp)

if not axis:
Expand All @@ -542,7 +550,7 @@ def plot_waterfall(hazard, entity, haz_future, ent_future,
# changing future
# socio-economic dev
imp = Impact()
imp.calc(ent_future.exposures, ent_future.impact_funcs, hazard)
imp.calc(ent_future.exposures, ent_future.impact_funcs, hazard, assign_centroids=False)
risk_dev = risk_func(imp)
LOGGER.info('Risk with development at {:d}: {:.3e}'.format(future_year, risk_dev))

Expand Down Expand Up @@ -687,7 +695,7 @@ def plot_waterfall_accumulated(self, hazard, entity, ent_future,
time_dep = self._time_dependency_array(imp_time_depen)
# socio-economic dev
imp = Impact()
imp.calc(ent_future.exposures, ent_future.impact_funcs, hazard)
imp.calc(ent_future.exposures, ent_future.impact_funcs, hazard, assign_centroids=False)
risk_dev = self._npv_unaverted_impact(risk_func(imp), entity.disc_rates,
time_dep, curr_risk)
LOGGER.info('Total risk with development at {:d}: {:.3e}'.format(
Expand Down Expand Up @@ -760,7 +768,7 @@ def _calc_impact_measures(self, hazard, exposures, meas_set, imp_fun_set,
# compute impact without measures
LOGGER.debug('%s impact with no measure.', when)
imp_tmp = Impact()
imp_tmp.calc(exposures, imp_fun_set, hazard)
imp_tmp.calc(exposures, imp_fun_set, hazard, assign_centroids=False)
impact_meas[NO_MEASURE] = dict()
impact_meas[NO_MEASURE]['cost'] = (0, 0)
impact_meas[NO_MEASURE]['risk'] = risk_func(imp_tmp)
Expand All @@ -772,7 +780,7 @@ def _calc_impact_measures(self, hazard, exposures, meas_set, imp_fun_set,
# compute impact for each measure
for measure in meas_set.get_measure(hazard.tag.haz_type):
LOGGER.debug('%s impact of measure %s.', when, measure.name)
imp_tmp, risk_transf = measure.calc_impact(exposures, imp_fun_set, hazard)
imp_tmp, risk_transf = measure.calc_impact(exposures, imp_fun_set, hazard, assign_centroids=False)
impact_meas[measure.name] = dict()
impact_meas[measure.name]['cost'] = (measure.cost, measure.risk_transf_cost_factor)
impact_meas[measure.name]['risk'] = risk_func(imp_tmp)
Expand Down
8 changes: 4 additions & 4 deletions climada/engine/forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,12 +292,12 @@ def calc(self, force_reassign=False):
default is false.
"""
# calc impact
if self.hazard:
self.exposure.assign_centroids(self.hazard[0], overwrite=force_reassign)
for ind_i, haz_i in enumerate(self.hazard):
# force reassign
if force_reassign:
self.exposure.assign_centroids(haz_i)
self._impact[ind_i].calc(
self.exposure, self.vulnerability, haz_i, save_mat=True
self.exposure, self.vulnerability, haz_i,
save_mat=True, assign_centroids=False
)

def plot_imp_map(
Expand Down
10 changes: 6 additions & 4 deletions climada/engine/impact.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def __init__(self,



def calc(self, exposures, impact_funcs, hazard, save_mat=False):
def calc(self, exposures, impact_funcs, hazard, save_mat=False, assign_centroids=True):
"""This function is deprecated, use ImpactCalc.impact
and ImpactCalc.insured_impact instead.
"""
Expand All @@ -198,11 +198,11 @@ def calc(self, exposures, impact_funcs, hazard, save_mat=False):
" for insured impacts instead. For non-insured impacts "
"please use ImpactCalc().impact()"
)
self.__dict__ = impcalc.insured_impact(save_mat).__dict__
self.__dict__ = impcalc.insured_impact(save_mat, assign_centroids).__dict__
else:
LOGGER.warning("The use of Impact().calc() is deprecated. "
"Use ImpactCalc().impact() instead.")
self.__dict__ = impcalc.impact(save_mat).__dict__
self.__dict__ = impcalc.impact(save_mat, assign_centroids).__dict__

#TODO: new name
@classmethod
Expand Down Expand Up @@ -1027,9 +1027,11 @@ def video_direct_impact(exp, impf_set, haz_list, file_name='',
imp_list = []
exp_list = []
imp_arr = np.zeros(len(exp.gdf))
# assign centroids once for all
exp.assign_centroids(haz_list[0])
for i_time, _ in enumerate(haz_list):
imp_tmp = Impact()
imp_tmp.calc(exp, impf_set, haz_list[i_time])
imp_tmp.calc(exp, impf_set, haz_list[i_time], assign_centroids=False)
imp_arr = np.maximum(imp_arr, imp_tmp.eai_exp)
# remove not impacted exposures
save_exp = imp_arr > imp_thresh
Expand Down
63 changes: 47 additions & 16 deletions climada/engine/impact_calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ def __init__(self,
self.exposures = exposures
self.impfset = impfset
self.hazard = hazard
self._orig_exp_idx = np.arange(self.exposures.gdf.shape[0]) #exposures index to use for matrix reconstruction
# exposures index to use for matrix reconstruction
self._orig_exp_idx = np.arange(self.exposures.gdf.shape[0])

@property
def n_exp_pnt(self):
Expand Down Expand Up @@ -101,13 +102,21 @@ def cover(self):
if 'cover' in self.exposures.gdf.columns:
return self.exposures.gdf['cover'].to_numpy()

def impact(self, save_mat=True):
def impact(self, save_mat=True, assign_centroids=True):
"""Compute the impact of a hazard on exposures.

Parameters
----------
save_mat : bool
save_mat : bool, optional
if true, save the total impact matrix (events x exposures)
Default: True
assign_centroids : bool, optional
indicates whether centroids are re-assigned to the self.exposures object
or kept from previous impact calculation with a hazard of the same hazard type.
Centroids assignment is an expensive operation; set this to ``False`` to save
computation time if the centroids have not changed since the last impact
calculation.
Default: True

Examples
--------
Expand All @@ -126,7 +135,7 @@ def impact(self, save_mat=True):
the column is added to the exposures geodataframe.
"""
impf_col = self.exposures.get_impf_column(self.hazard.haz_type)
exp_gdf = self.minimal_exp_gdf(impf_col)
exp_gdf = self.minimal_exp_gdf(impf_col, assign_centroids)
if exp_gdf.size == 0:
return self._return_empty(save_mat)
LOGGER.info('Calculating impact for %s assets (>0) and %s events.',
Expand All @@ -136,7 +145,7 @@ def impact(self, save_mat=True):

#TODO: make a better impact matrix generator for insured impacts when
# the impact matrix is already present
def insured_impact(self, save_mat=False):
def insured_impact(self, save_mat=False, assign_centroids=True):
"""Compute the impact of a hazard on exposures with a deductible and/or
cover.

Expand All @@ -147,6 +156,13 @@ def insured_impact(self, save_mat=False):
----------
save_mat : bool
if true, save the total impact matrix (events x exposures)
assign_centroids : bool, optional
indicates whether centroids are re-assigned to the self.exposures object
or kept from previous impact calculation with a hazard of the same hazard type.
Centroids assignment is an expensive operation; set this to ``False`` to save
computation time if the centroids have not changed since the last impact
calculation.
Default: True

Examples
--------
Expand All @@ -169,7 +185,7 @@ def insured_impact(self, save_mat=False):
"Please set exposures.gdf.cover"
"and/or exposures.gdf.deductible")
impf_col = self.exposures.get_impf_column(self.hazard.haz_type)
exp_gdf = self.minimal_exp_gdf(impf_col)
exp_gdf = self.minimal_exp_gdf(impf_col, assign_centroids)
if exp_gdf.size == 0:
return self._return_empty(save_mat)
LOGGER.info('Calculating impact for %s assets (>0) and %s events.',
Expand Down Expand Up @@ -238,18 +254,29 @@ def _return_empty(self, save_mat):
return Impact.from_eih(self.exposures, self.impfset, self.hazard,
at_event, eai_exp, aai_agg, imp_mat)

def minimal_exp_gdf(self, impf_col):
def minimal_exp_gdf(self, impf_col, assign_centroids):
"""Get minimal exposures geodataframe for impact computation

Parameters
----------
exposures : climada.entity.Exposures
hazard : climada.Hazard
impf_col: str
name of the impact function column in exposures.gdf

"""
self.exposures.assign_centroids(self.hazard, overwrite=False)
impf_col : str
Name of the impact function column in exposures.gdf
assign_centroids : bool
Indicates whether centroids are re-assigned to the self.exposures object
or kept from previous impact calculation with a hazard of the same hazard type.
Centroids assignment is an expensive operation; set this to ``False`` to save
computation time if the centroids have not changed since the last impact
calculation.
"""
if assign_centroids:
self.exposures.assign_centroids(self.hazard, overwrite=True)
elif self.hazard.centr_exp_col not in self.exposures.gdf.columns:
raise ValueError("'assign_centroids' is set to 'False' but no centroids are assigned"
f" for the given hazard type ({self.hazard.tag.haz_type})."
" Run 'exposures.assign_centroids()' beforehand or set"
" 'assign_centroids' to 'True'")
mask = (
(self.exposures.gdf.value.values != 0)
& (self.exposures.gdf[self.hazard.centr_exp_col].values >= 0)
Expand All @@ -260,7 +287,8 @@ def minimal_exp_gdf(self, impf_col):
)
if exp_gdf.size == 0:
LOGGER.warning("No exposures with value >0 in the vicinity of the hazard.")
self._orig_exp_idx = mask.nonzero()[0] #update index of kept exposures points in exp_gdf within the full exposures
self._orig_exp_idx = mask.nonzero()[0] # update index of kept exposures points in exp_gdf
# within the full exposures
return exp_gdf

def imp_mat_gen(self, exp_gdf, impf_col):
Expand Down Expand Up @@ -381,10 +409,11 @@ def impact_matrix(self, exp_values, cent_idx, impf):
scipy.sparse.csr_matrix
Impact per event (rows) per exposure point (columns)
"""
n_exp_pnt = len(cent_idx) #implicitly checks in matrix assignement whether len(cent_idx) == len(exp_values)
n_exp_pnt = len(cent_idx) # implicitly checks in matrix assignement whether
# len(cent_idx) == len(exp_values)
mdr = self.hazard.get_mdr(cent_idx, impf)
fract = self.hazard.get_fraction(cent_idx)
exp_values_csr = sparse.csr_matrix( #vector 1 x exp_size
exp_values_csr = sparse.csr_matrix( # vector 1 x exp_size
(exp_values, np.arange(n_exp_pnt), [0, n_exp_pnt]),
shape=(1, n_exp_pnt))
return fract.multiply(mdr).multiply(exp_values_csr)
Expand All @@ -393,7 +422,9 @@ def stitch_impact_matrix(self, imp_mat_gen):
"""
Make an impact matrix from an impact sub-matrix generator
"""
data, row, col = np.hstack([ #rows=events index, cols=exposure point index within self.exposures
# rows: events index
# cols: exposure point index within self.exposures
data, row, col = np.hstack([
(mat.data, mat.nonzero()[0], self._orig_exp_idx[idx][mat.nonzero()[1]])
for mat, idx in imp_mat_gen
])
Expand Down
Loading