Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Flatten default initializer dictionaries #1425

Merged
merged 20 commits into from
May 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion Documentation/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ For more information on HARK, see [our Github organization](https://github.com/e

Release Date: TBA

Note: Due to major changes on this release, you may need to adjust how AgentTypes are instantiated in your projects using HARK. If you are manually constructing "complicated" objects like MrkvArray, they should be assigned to your instances *after* initialization, not passed as part of the parameter dictionary. See also the new constructor methodology for how to pass parameters for such constructed inputs.

### Major Changes

- Replace object-oriented solvers with single function versions. [#1394](https://github.com/econ-ark/HARK/pull/1394)
Expand All @@ -29,9 +31,10 @@ Release Date: TBA
- Moves computation of "stable points" from inside of ConsIndShock solver to a post-solution method. [#1349](https://github.com/econ-ark/HARK/pull/1349)
- Corrects calculation of "human wealth" under risky returns, providing correct limiting linear consumption function. [#1403](https://github.com/econ-ark/HARK/pull/1403)
- Removed 'parameters' from new block definitions; these are now 'calibrations' provided separately.
- Create functions for well-known and repeated calculations in single-function solvers. [#1395](https://github.com/econ-ark/HARK/pull/1395)
- Create functions for well-known and repeated calculations in single-function solvers. [1395](https://github.com/econ-ark/HARK/pull/1395)
- Re-work WealthPortfolioSolver to use approximate EGM method [#1404](https://github.com/econ-ark/HARK/pull/1404)
- Default parameter dictionaries for AgentType subclasses have been "flattened": all parameters appear in one place for each model, rather than inheriting from parent models' dictionaries. The only exception is submodels *within* a file when only 1 or 2 parameters are added or changed. (#1425)[https://github.com/econ-ark/HARK/pull/1425]


### 0.14.1

Expand Down
63 changes: 62 additions & 1 deletion HARK/Calibration/Income/IncomeProcesses.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,68 @@ def make_AR1_style_pLvlNextFunc(T_cycle, pLvlInitMean, PermGroFac, PrstIncCorr):
return pLvlNextFunc


def construct_pLvlGrid_by_simulation(
###############################################################################


def make_basic_pLvlPctiles(
pLvlPctiles_count,
pLvlPctiles_bound=[0.001, 0.999],
pLvlPctiles_tail_count=0,
pLvlPctiles_tail_order=np.e,
):
"""
Make a relatively basic specification for pLvlPctiles by choosing the number
of uniformly spaced nodes in the "body", the percentile boundaries for the
body, the number of nodes in each tail, and the order/factor by which the
tail percentiles approach 0 and 1 respectively.

Parameters
----------
pLvlPctile_count : int
Number of nodes in the "body" of the percentile set.
pLvlPctile_bound : [float,float], optional
Percentile bounds for the "body" of the set. The default is [0.0, 1.0].
pLvlPctile_tail_count : int, optional
Number of nodes in each extant tail of the set. The default is 0.
pLvlPctile_tail_order : float, optional
Factor by which tail percentiles shrink toward 0 and 1. The default is np.e.

Returns
-------
pLvlPctiles : np.array
Array of percentiles of pLvl, usually used to construct pLvlGrid using
the function below.
"""
bound = pLvlPctiles_bound
fac = 1.0 / pLvlPctiles_tail_order
body = np.linspace(bound[0], bound[1], num=pLvlPctiles_count)

if bound[0] > 0.0:
lower = []
val = bound[0]
for i in range(pLvlPctiles_tail_count):
val *= fac
lower.append(val)
lower.reverse()
lower = np.array(lower)
else:
lower = np.array([])

if bound[1] < 1.0:
upper = []
val = 1.0 - bound[1]
for i in range(pLvlPctiles_tail_count):
val *= fac
upper.append(val)
upper = 1.0 - np.array(upper)
else:
upper = np.array([])

pLvlPctiles = np.concatenate((lower, body, upper))
return pLvlPctiles


def make_pLvlGrid_by_simulation(
cycles,
T_cycle,
PermShkDstn,
Expand Down
97 changes: 76 additions & 21 deletions HARK/ConsumptionSaving/ConsAggShockModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,11 @@
import scipy.stats as stats

from HARK import AgentType, Market
from HARK.Calibration.Income.IncomeProcesses import (
construct_lognormal_income_process_unemployment,
get_PermShkDstn_from_IncShkDstn,
get_TranShkDstn_from_IncShkDstn,
)
from HARK.ConsumptionSaving.ConsIndShockModel import (
ConsumerSolution,
IndShockConsumerType,
Expand Down Expand Up @@ -45,7 +50,7 @@
CRRAutilityP_inv,
CRRAutilityPP,
)
from HARK.utilities import make_grid_exp_mult
from HARK.utilities import make_grid_exp_mult, make_assets_grid

__all__ = [
"AggShockConsumerType",
Expand Down Expand Up @@ -101,20 +106,73 @@ def make_aggshock_solution_terminal(CRRA):
aggshock_constructor_dict = indshk_constructor_dict.copy()
aggshock_constructor_dict["solution_terminal"] = make_aggshock_solution_terminal

# Interest factor is endogenous in agg shocks model
del init_agg_shocks["Rfree"]
del init_agg_shocks["CubicBool"] # Not supported yet for agg shocks model
del init_agg_shocks["vFuncBool"] # Not supported yet for agg shocks model
init_agg_shocks["PermGroFac"] = [1.0]
# Grid of capital-to-labor-ratios (factors)
# Make a dictionary of constructors for the aggregate income shocks model
aggshock_constructor_dict = {
"IncShkDstn": construct_lognormal_income_process_unemployment,
"PermShkDstn": get_PermShkDstn_from_IncShkDstn,
"TranShkDstn": get_TranShkDstn_from_IncShkDstn,
"aXtraGrid": make_assets_grid,
"solution_terminal": make_aggshock_solution_terminal,
}

# Default parameters to make IncShkDstn using construct_lognormal_income_process_unemployment
default_IncShkDstn_params = {
"PermShkStd": [0.1], # Standard deviation of log permanent income shocks
"PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd": [0.1], # Standard deviation of log transitory income shocks
"TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb": 0.05, # Probability of unemployment while working
"IncUnemp": 0.3, # Unemployment benefits replacement rate while working
"T_retire": 0, # Period of retirement (0 --> no retirement)
"UnempPrbRet": 0.005, # Probability of "unemployment" while retired
"IncUnempRet": 0.0, # "Unemployment" benefits when retired
}

# Default parameters to make aXtraGrid using make_assets_grid
default_aXtraGrid_params = {
"aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax": 20, # Maximum end-of-period "assets above minimum" value
"aXtraNestFac": 3, # Exponential nesting factor for aXtraGrid
"aXtraCount": 24, # Number of points in the grid of "assets above minimum"
"aXtraExtra": None, # Additional other values to add in grid (optional)
}

# Choose a grid of capital-to-labor-ratios (factors relative to SS)
# TODO: Make a constructor for this
MgridBase = np.array(
[0.1, 0.3, 0.6, 0.8, 0.9, 0.98, 1.0, 1.02, 1.1, 1.2, 1.6, 2.0, 3.0]
)
init_agg_shocks["MgridBase"] = MgridBase
init_agg_shocks["aXtraCount"] = 24
init_agg_shocks["aNrmInitStd"] = 0.0
init_agg_shocks["LivPrb"] = [0.98]
init_agg_shocks["constructors"] = aggshock_constructor_dict

# Make a dictionary to specify an aggregate income shocks consumer type
init_agg_shocks = {
# BASIC HARK PARAMETERS REQUIRED TO SOLVE THE MODEL
"cycles": 1, # Finite, non-cyclic model
"T_cycle": 1, # Number of periods in the cycle for this agent type
"constructors": aggshock_constructor_dict, # See dictionary above
# PRIMITIVE RAW PARAMETERS REQUIRED TO SOLVE THE MODEL
"CRRA": 2.0, # Coefficient of relative risk aversion
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb": [0.98], # Survival probability after each period
"PermGroFac": [1.00], # Permanent income growth factor
"BoroCnstArt": 0.0, # Artificial borrowing constraint
"MgridBase": MgridBase,
# PARAMETERS REQUIRED TO SIMULATE THE MODEL
"AgentCount": 10000, # Number of agents of this type
"T_age": None, # Age after which simulated agents are automatically killed
"aNrmInitMean": 0.0, # Mean of log initial assets
"aNrmInitStd": 0.0, # Standard deviation of log initial assets
"pLvlInitMean": 0.0, # Mean of log initial permanent income
"pLvlInitStd": 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg": 1.0, # Aggregate permanent income growth factor
# (The portion of PermGroFac attributable to aggregate productivity growth)
"NewbornTransShk": False, # Whether Newborns have transitory shock
# ADDITIONAL OPTIONAL PARAMETERS
"PerfMITShk": False, # Do Perfect Foresight MIT Shock
# (Forces Newborns to follow solution path of the agent they replaced if True)
"neutral_measure": False, # Whether to use permanent income neutral measure (see Harmenberg 2021)
}
init_agg_shocks.update(default_IncShkDstn_params)
init_agg_shocks.update(default_aXtraGrid_params)


class AggShockConsumerType(IndShockConsumerType):
Expand Down Expand Up @@ -435,17 +493,11 @@ def make_euler_error_func(self, mMax=100, approx_inc_dstn=True):
raise NotImplementedError()


###############################################################################


# This example makes a high risk, low growth state and a low risk, high growth state
MrkvArray = np.array([[0.90, 0.10], [0.04, 0.96]])
PermShkAggStd = [
0.012,
0.006,
] # Standard deviation of log aggregate permanent shocks by state
TranShkAggStd = [
0.006,
0.003,
] # Standard deviation of log aggregate transitory shocks by state
PermGroFacAgg = [0.98, 1.02] # Aggregate permanent income growth factor

# Make a dictionary to specify a Markov aggregate shocks consumer
init_agg_mrkv_shocks = init_agg_shocks.copy()
Expand Down Expand Up @@ -621,6 +673,9 @@ def getMrkvNow(self): # This function exists to be overwritten in StickyE model
return self.shocks["Mrkv"] * np.ones(self.AgentCount, dtype=int)


##############################################################################

# Make a dictionary for Krusell-Smith agents
init_KS_agents = {
"T_cycle": 1,
"DiscFac": 0.99,
Expand Down
Loading
Loading