diff --git a/notebooks/Uncertainty-and-the-Saving-Rate.ipynb b/notebooks/Uncertainty-and-the-Saving-Rate.ipynb index 48589728..b19cbe53 100644 --- a/notebooks/Uncertainty-and-the-Saving-Rate.ipynb +++ b/notebooks/Uncertainty-and-the-Saving-Rate.ipynb @@ -32,7 +32,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": { "code_folding": [ 0, @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 16, "metadata": { "code_folding": [ 0 @@ -126,8 +126,15 @@ "# Import HARK tools and cstwMPC parameter values\n", "from HARK.utilities import plotFuncsDer, plotFuncs\n", "from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType\n", - "import HARK.cstwMPC.cstwMPC as cstwMPC\n", - "import HARK.cstwMPC.SetupParamsCSTW as Params\n", + "from HARK.ConsumptionSaving.ConsAggShockModel import AggShockConsumerType, CobbDouglasEconomy\n", + "from HARK.datasets import load_SCF_wealth_weights\n", + "from HARK.utilities import getLorenzShares\n", + "\n", + "#import HARK.cstwMPC.cstwMPC as cstwMPC\n", + "\n", + "SCF_wealth, SCF_weights = load_SCF_wealth_weights()\n", + "# Which points of the Lorenz curve to match in beta-dist (must be in (0,1))\n", + "percentiles_to_match = [0.2, 0.4, 0.6, 0.8] \n", "\n", "# Double the default value of variance\n", "# Params.init_infinite['PermShkStd'] = [i*2 for i in Params.init_infinite['PermShkStd']]" @@ -135,32 +142,31 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 12, "metadata": { "code_folding": [ 0 - ], - "scrolled": false + ] }, "outputs": [], "source": [ "# Setup stuff for general equilibrium version\n", "\n", "# Set targets for K/Y and the Lorenz curve\n", - "lorenz_target = cstwMPC.getLorenzShares(Params.SCF_wealth,weights=\n", - " Params.SCF_weights,percentiles=\n", - " Params.percentiles_to_match)\n", + "lorenz_target = getLorenzShares(SCF_wealth,\n", + " weights= SCF_weights,\n", + " percentiles= percentiles_to_match)\n", "\n", "lorenz_long_data = np.hstack((np.array(0.0),\\\n", - " cstwMPC.getLorenzShares(Params.SCF_wealth,weights=\\\n", - " Params.SCF_weights,percentiles=\\\n", - " np.arange(0.01,1.0,0.01).tolist()),np.array(1.0)))\n", + " getLorenzShares(SCF_wealth,\n", + " weights=SCF_weights,\n", + " percentiles=np.arange(0.01,1.0,0.01).tolist()),np.array(1.0)))\n", "KY_target = 10.26" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 15, "metadata": { "code_folding": [ 0 @@ -170,33 +176,70 @@ "source": [ "# Setup and calibration of the agent types\n", "\n", + "# Define a dictionary with calibrated parameters\n", + "cstwMPC_init_infinite = {\n", + " \"CRRA\":1.0, # Coefficient of relative risk aversion \n", + " \"Rfree\":1.01/(1.0 - 1.0/160.0), # Survival probability,\n", + " \"PermGroFac\":[1.000**0.25], # Permanent income growth factor (no perm growth),\n", + " \"PermGroFacAgg\":1.0,\n", + " \"BoroCnstArt\":0.0,\n", + " \"CubicBool\":False,\n", + " \"vFuncBool\":False,\n", + " \"PermShkStd\":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income\n", + " \"PermShkCount\":5, # Number of points in permanent income shock grid\n", + " \"TranShkStd\":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income,\n", + " \"TranShkCount\":5, # Number of points in transitory income shock grid\n", + " \"UnempPrb\":0.07, # Probability of unemployment while working\n", + " \"IncUnemp\":0.15, # Unemployment benefit replacement rate\n", + " \"UnempPrbRet\":None,\n", + " \"IncUnempRet\":None,\n", + " \"aXtraMin\":0.00001, # Minimum end-of-period assets in grid\n", + " \"aXtraMax\":40, # Maximum end-of-period assets in grid\n", + " \"aXtraCount\":32, # Number of points in assets grid\n", + " \"aXtraExtra\":[None],\n", + " \"aXtraNestFac\":3, # Number of times to 'exponentially nest' when constructing assets grid\n", + " \"LivPrb\":[1.0 - 1.0/160.0], # Survival probability\n", + " \"DiscFac\":0.97, # Default intertemporal discount factor; dummy value, will be overwritten\n", + " \"cycles\":0,\n", + " \"T_cycle\":1,\n", + " \"T_retire\":0,\n", + " 'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth)\n", + " 'T_age': 400,\n", + " 'IndL': 10.0/9.0, # Labor supply per individual (constant),\n", + " 'aNrmInitMean':np.log(0.00001),\n", + " 'aNrmInitStd':0.0,\n", + " 'pLvlInitMean':0.0,\n", + " 'pLvlInitStd':0.0,\n", + " 'AgentCount':10000,\n", + "}\n", + "\n", "# The parameter values below are taken from\n", "# http://econ.jhu.edu/people/ccarroll/papers/cjSOE/#calibration\n", "\n", - "Params.init_cjSOE = Params.init_infinite # Get default values of all parameters\n", + "init_cjSOE = cstwMPC_init_infinite # Get default values of all parameters\n", "# Now change some of the parameters for the individual's problem to those of cjSOE\n", - "Params.init_cjSOE['CRRA'] = 2\n", - "Params.init_cjSOE['Rfree'] = 1.04**0.25\n", - "Params.init_cjSOE['PermGroFac'] = [1.01**0.25] # Indiviual-specific income growth (from experience, e.g.)\n", - "Params.init_cjSOE['PermGroFacAgg'] = 1.04**0.25 # Aggregate productivity growth \n", - "Params.init_cjSOE['LivPrb'] = [0.95**0.25] # Matches a short working life \n", + "init_cjSOE['CRRA'] = 2\n", + "init_cjSOE['Rfree'] = 1.04**0.25\n", + "init_cjSOE['PermGroFac'] = [1.01**0.25] # Indiviual-specific income growth (from experience, e.g.)\n", + "init_cjSOE['PermGroFacAgg'] = 1.04**0.25 # Aggregate productivity growth \n", + "init_cjSOE['LivPrb'] = [0.95**0.25] # Matches a short working life \n", "\n", "PopGroFac_cjSOE = [1.01**0.25] # Irrelevant to the individual's choice; attach later to \"market\" economy object\n", "\n", "# Instantiate the baseline agent type with the parameters defined above\n", - "BaselineType = cstwMPC.cstwMPCagent(**Params.init_cjSOE)\n", + "BaselineType = AggShockConsumerType(**init_cjSOE)\n", "BaselineType.AgeDstn = np.array(1.0) # Fix the age distribution of agents\n", "\n", "# Make desired number of agent types (to capture ex-ante heterogeneity)\n", "EstimationAgentList = []\n", - "for n in range(Params.pref_type_count):\n", + "for n in range(1):\n", " EstimationAgentList.append(deepcopy(BaselineType))\n", " EstimationAgentList[n].seed = n # Give every instance a different seed" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 18, "metadata": { "code_folding": [ 0 @@ -206,16 +249,30 @@ "source": [ "# Make an economy for the consumers to live in\n", "\n", - "EstimationEconomy = cstwMPC.cstwMPCmarket(**Params.init_market)\n", + "init_market = {'LorenzBool': False,\n", + " 'ManyStatsBool': False,\n", + " 'ignore_periods':0, # Will get overwritten\n", + " 'PopGroFac':0.0, # Will get overwritten\n", + " 'T_retire':0, # Will get overwritten\n", + " 'TypeWeights':[], # Will get overwritten\n", + " 'Population': 10000,\n", + " 'act_T':0, # Will get overwritten\n", + " 'IncUnemp':0.15,\n", + " 'cutoffs':[(0.99,1),(0.9,1),(0.8,1),(0.6,0.8),(0.4,0.6),(0.2,0.4),(0.0,0.2)],\n", + " 'LorenzPercentiles':percentiles_to_match,\n", + " 'AggShockBool':False\n", + " }\n", + "\n", + "EstimationEconomy = CobbDouglasEconomy(init_market)\n", "EstimationEconomy.print_parallel_error_once = True # Avoids a bug in the code\n", "\n", "EstimationEconomy.agents = EstimationAgentList\n", - "EstimationEconomy.act_T = Params.T_sim_PY # How many periods of history are good enough for \"steady state\"" + "EstimationEconomy.act_T = 1200 # How many periods of history are good enough for \"steady state\"" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 19, "metadata": { "code_folding": [ 0 @@ -228,7 +285,7 @@ "EstimationEconomy.LorenzTarget = lorenz_target\n", "EstimationEconomy.LorenzData = lorenz_long_data\n", "EstimationEconomy.PopGroFac = PopGroFac_cjSOE # Population growth characterizes the entire economy\n", - "EstimationEconomy.ignore_periods = Params.ignore_periods_PY # Presample periods\n", + "EstimationEconomy.ignore_periods = 400 # Presample periods\n", "\n", "#Display statistics about the estimated model (or not)\n", "EstimationEconomy.LorenzBool = False\n", @@ -238,86 +295,80 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "code_folding": [ - 0 - ], - "lines_to_next_cell": 2 - }, + "execution_count": 21, + "metadata": {}, "outputs": [], "source": [ - "# construct spread_estimate and center_estimate if true, otherwise use the default values\n", - "Params.do_param_dist=True # Whether to use a distribution of ex-ante heterogeneity\n", - "\n", - "# Discount factors assumed to be uniformly distributed around center_pre for spread_pre on either side\n", - "\n", - "spread_pre=0.0019501105739768 #result under the default calibration of cjSOE\n", - "center_pre=1.0065863855906343 #result under the default calibration of cjSOE\n", - "\n", - "do_optimizing=False # Set to True to reestimate the distribution of time preference rates\n", - "\n", - "if do_optimizing: # If you want to rerun the cstwMPC estimation, change do_optimizing to True\n", - " # Finite value requires discount factor from combined pure and mortality-induced\n", - " # discounting to be less than one, so maximum DiscFac is 1/LivPrb\n", - " DiscFacMax = 1/Params.init_cjSOE['LivPrb'][0] # \n", - " param_range = [0.995,-0.0001+DiscFacMax] \n", - " spread_range = [0.00195,0.0205] # \n", - "\n", - " if Params.do_param_dist: # If configured to estimate the distribution\n", - " LorenzBool = True\n", - " # Run the param-dist estimation\n", - " paramDistObjective = lambda spread : cstwMPC.findLorenzDistanceAtTargetKY(\n", - " Economy = EstimationEconomy,\n", - " param_name = Params.param_name,\n", - " param_count = Params.pref_type_count,\n", - " center_range = param_range,\n", - " spread = spread,\n", - " dist_type = Params.dist_type) # Distribution of DiscFac\n", - " t_start = time()\n", - " \n", - " spread_estimate = golden(paramDistObjective \n", - " ,brack=spread_range\n", - " ,tol=1e-4) \n", - " center_estimate = EstimationEconomy.center_save\n", - " t_end = time()\n", - " else: # Run the param-point estimation only\n", - " paramPointObjective = lambda center : cstwMPC.getKYratioDifference(Economy = EstimationEconomy,\n", - " param_name = Params.param_name,\n", - " param_count = Params.pref_type_count,\n", - " center = center,\n", - " spread = 0.0,\n", - " dist_type = Params.dist_type)\n", - " t_start = time()\n", - " center_estimate = brentq(paramPointObjective # Find best point estimate \n", - " ,param_range[0]\n", - " ,param_range[1],xtol=1e-6)\n", - " spread_estimate = 0.0\n", - " t_end = time()\n", - " \n", - " print(spread_estimate)\n", - " print('****************')\n", - " print(center_estimate)\n", - " print('****************')\n", - "else: # Just use the hard-wired numbers from cstwMPC\n", - " center_estimate=center_pre\n", - " spread_estimate=spread_pre" + "def distributeParams(self,param_name,param_count,center,spread,dist_type):\n", + " '''\n", + " Distributes heterogeneous values of one parameter to the AgentTypes in self.agents.\n", + " Parameters\n", + " ----------\n", + " param_name : string\n", + " Name of the parameter to be assigned.\n", + " param_count : int\n", + " Number of different values the parameter will take on.\n", + " center : float\n", + " A measure of centrality for the distribution of the parameter.\n", + " spread : float\n", + " A measure of spread or diffusion for the distribution of the parameter.\n", + " dist_type : string\n", + " The type of distribution to be used. Can be \"lognormal\" or \"uniform\" (can expand).\n", + " Returns\n", + " -------\n", + " None\n", + " '''\n", + " # Get a list of discrete values for the parameter\n", + " if dist_type == 'uniform':\n", + " # If uniform, center is middle of distribution, spread is distance to either edge\n", + " param_dist = approxUniform(N=param_count,bot=center-spread,top=center+spread)\n", + " elif dist_type == 'lognormal':\n", + " # If lognormal, center is the mean and spread is the standard deviation (in log)\n", + " tail_N = 3\n", + " param_dist = approxLognormal(N=param_count-tail_N,mu=np.log(center)-0.5*spread**2,sigma=spread,tail_N=tail_N,tail_bound=[0.0,0.9], tail_order=np.e)\n", + "\n", + " # Distribute the parameters to the various types, assigning consecutive types the same\n", + " # value if there are more types than values\n", + " replication_factor = len(self.agents) // param_count \n", + " # Note: the double division is intenger division in Python 3 and 2.7, this makes it explicit\n", + " j = 0\n", + " b = 0\n", + " while j < len(self.agents):\n", + " for n in range(replication_factor):\n", + " self.agents[j](AgentCount = int(self.Population*param_dist[0][b]*self.TypeWeight[n]))\n", + " exec('self.agents[j](' + param_name + '= param_dist[1][b])')\n", + " j += 1\n", + " b += 1\n", + " \n", + "EstimationEconomy.distributeParams = distributeParams" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 23, "metadata": { "code_folding": [ 0 ] }, - "outputs": [], + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'center_estimate' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m'DiscFac'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;36m7\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;31m# How many different types of consumer are there\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mcenter_estimate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;31m# Increase patience slightly vs cstwMPC so that maximum saving rate is higher\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0mspread_estimate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;31m# How much difference is there across consumers\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m Params.dist_type) # Default is for a uniform distribution\n", + "\u001b[0;31mNameError\u001b[0m: name 'center_estimate' is not defined" + ] + } + ], "source": [ "# Construct the economy at date 0\n", "EstimationEconomy.distributeParams( # Construct consumer types whose heterogeneity is in the given parameter\n", " 'DiscFac',\n", - " Params.pref_type_count,# How many different types of consumer are there \n", + " 7,# How many different types of consumer are there \n", " center_estimate, # Increase patience slightly vs cstwMPC so that maximum saving rate is higher\n", " spread_estimate, # How much difference is there across consumers\n", " Params.dist_type) # Default is for a uniform distribution" @@ -563,8 +614,7 @@ "metadata": { "code_folding": [ 0 - ], - "scrolled": false + ] }, "outputs": [], "source": [ diff --git a/notebooks/Uncertainty-and-the-Saving-Rate.py b/notebooks/Uncertainty-and-the-Saving-Rate.py index 931cb426..eae730dd 100644 --- a/notebooks/Uncertainty-and-the-Saving-Rate.py +++ b/notebooks/Uncertainty-and-the-Saving-Rate.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.2' -# jupytext_version: 1.2.3 +# jupytext_version: 1.2.4 # kernelspec: # display_name: Python 3 # language: python @@ -115,8 +115,15 @@ def in_ipynb(): # Import HARK tools and cstwMPC parameter values from HARK.utilities import plotFuncsDer, plotFuncs from HARK.ConsumptionSaving.ConsIndShockModel import PerfForesightConsumerType -import HARK.cstwMPC.cstwMPC as cstwMPC -import HARK.cstwMPC.SetupParamsCSTW as Params +from HARK.ConsumptionSaving.ConsAggShockModel import AggShockConsumerType, CobbDouglasEconomy +from HARK.datasets import load_SCF_wealth_weights +from HARK.utilities import getLorenzShares + +#import HARK.cstwMPC.cstwMPC as cstwMPC + +SCF_wealth, SCF_weights = load_SCF_wealth_weights() +# Which points of the Lorenz curve to match in beta-dist (must be in (0,1)) +percentiles_to_match = [0.2, 0.4, 0.6, 0.8] # Double the default value of variance # Params.init_infinite['PermShkStd'] = [i*2 for i in Params.init_infinite['PermShkStd']] @@ -125,50 +132,101 @@ def in_ipynb(): # Setup stuff for general equilibrium version # Set targets for K/Y and the Lorenz curve -lorenz_target = cstwMPC.getLorenzShares(Params.SCF_wealth,weights= - Params.SCF_weights,percentiles= - Params.percentiles_to_match) +lorenz_target = getLorenzShares(SCF_wealth, + weights= SCF_weights, + percentiles= percentiles_to_match) lorenz_long_data = np.hstack((np.array(0.0),\ - cstwMPC.getLorenzShares(Params.SCF_wealth,weights=\ - Params.SCF_weights,percentiles=\ - np.arange(0.01,1.0,0.01).tolist()),np.array(1.0))) + getLorenzShares(SCF_wealth, + weights=SCF_weights, + percentiles=np.arange(0.01,1.0,0.01).tolist()),np.array(1.0))) KY_target = 10.26 # %% {"code_folding": [0]} # Setup and calibration of the agent types +# Define a dictionary with calibrated parameters +cstwMPC_init_infinite = { + "CRRA":1.0, # Coefficient of relative risk aversion + "Rfree":1.01/(1.0 - 1.0/160.0), # Survival probability, + "PermGroFac":[1.000**0.25], # Permanent income growth factor (no perm growth), + "PermGroFacAgg":1.0, + "BoroCnstArt":0.0, + "CubicBool":False, + "vFuncBool":False, + "PermShkStd":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income + "PermShkCount":5, # Number of points in permanent income shock grid + "TranShkStd":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income, + "TranShkCount":5, # Number of points in transitory income shock grid + "UnempPrb":0.07, # Probability of unemployment while working + "IncUnemp":0.15, # Unemployment benefit replacement rate + "UnempPrbRet":None, + "IncUnempRet":None, + "aXtraMin":0.00001, # Minimum end-of-period assets in grid + "aXtraMax":40, # Maximum end-of-period assets in grid + "aXtraCount":32, # Number of points in assets grid + "aXtraExtra":[None], + "aXtraNestFac":3, # Number of times to 'exponentially nest' when constructing assets grid + "LivPrb":[1.0 - 1.0/160.0], # Survival probability + "DiscFac":0.97, # Default intertemporal discount factor; dummy value, will be overwritten + "cycles":0, + "T_cycle":1, + "T_retire":0, + 'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth) + 'T_age': 400, + 'IndL': 10.0/9.0, # Labor supply per individual (constant), + 'aNrmInitMean':np.log(0.00001), + 'aNrmInitStd':0.0, + 'pLvlInitMean':0.0, + 'pLvlInitStd':0.0, + 'AgentCount':10000, +} + # The parameter values below are taken from # http://econ.jhu.edu/people/ccarroll/papers/cjSOE/#calibration -Params.init_cjSOE = Params.init_infinite # Get default values of all parameters +init_cjSOE = cstwMPC_init_infinite # Get default values of all parameters # Now change some of the parameters for the individual's problem to those of cjSOE -Params.init_cjSOE['CRRA'] = 2 -Params.init_cjSOE['Rfree'] = 1.04**0.25 -Params.init_cjSOE['PermGroFac'] = [1.01**0.25] # Indiviual-specific income growth (from experience, e.g.) -Params.init_cjSOE['PermGroFacAgg'] = 1.04**0.25 # Aggregate productivity growth -Params.init_cjSOE['LivPrb'] = [0.95**0.25] # Matches a short working life +init_cjSOE['CRRA'] = 2 +init_cjSOE['Rfree'] = 1.04**0.25 +init_cjSOE['PermGroFac'] = [1.01**0.25] # Indiviual-specific income growth (from experience, e.g.) +init_cjSOE['PermGroFacAgg'] = 1.04**0.25 # Aggregate productivity growth +init_cjSOE['LivPrb'] = [0.95**0.25] # Matches a short working life PopGroFac_cjSOE = [1.01**0.25] # Irrelevant to the individual's choice; attach later to "market" economy object # Instantiate the baseline agent type with the parameters defined above -BaselineType = cstwMPC.cstwMPCagent(**Params.init_cjSOE) +BaselineType = AggShockConsumerType(**init_cjSOE) BaselineType.AgeDstn = np.array(1.0) # Fix the age distribution of agents # Make desired number of agent types (to capture ex-ante heterogeneity) EstimationAgentList = [] -for n in range(Params.pref_type_count): +for n in range(1): EstimationAgentList.append(deepcopy(BaselineType)) EstimationAgentList[n].seed = n # Give every instance a different seed # %% {"code_folding": [0]} # Make an economy for the consumers to live in -EstimationEconomy = cstwMPC.cstwMPCmarket(**Params.init_market) +init_market = {'LorenzBool': False, + 'ManyStatsBool': False, + 'ignore_periods':0, # Will get overwritten + 'PopGroFac':0.0, # Will get overwritten + 'T_retire':0, # Will get overwritten + 'TypeWeights':[], # Will get overwritten + 'Population': 10000, + 'act_T':0, # Will get overwritten + 'IncUnemp':0.15, + 'cutoffs':[(0.99,1),(0.9,1),(0.8,1),(0.6,0.8),(0.4,0.6),(0.2,0.4),(0.0,0.2)], + 'LorenzPercentiles':percentiles_to_match, + 'AggShockBool':False + } + +EstimationEconomy = CobbDouglasEconomy(init_market) EstimationEconomy.print_parallel_error_once = True # Avoids a bug in the code EstimationEconomy.agents = EstimationAgentList -EstimationEconomy.act_T = Params.T_sim_PY # How many periods of history are good enough for "steady state" +EstimationEconomy.act_T = 1200 # How many periods of history are good enough for "steady state" # %% {"code_folding": [0]} # Uninteresting parameters that also need to be set @@ -176,76 +234,63 @@ def in_ipynb(): EstimationEconomy.LorenzTarget = lorenz_target EstimationEconomy.LorenzData = lorenz_long_data EstimationEconomy.PopGroFac = PopGroFac_cjSOE # Population growth characterizes the entire economy -EstimationEconomy.ignore_periods = Params.ignore_periods_PY # Presample periods +EstimationEconomy.ignore_periods = 400 # Presample periods #Display statistics about the estimated model (or not) EstimationEconomy.LorenzBool = False EstimationEconomy.ManyStatsBool = False EstimationEconomy.TypeWeight = [1.0] -# %% {"code_folding": [0]} -# construct spread_estimate and center_estimate if true, otherwise use the default values -Params.do_param_dist=True # Whether to use a distribution of ex-ante heterogeneity - -# Discount factors assumed to be uniformly distributed around center_pre for spread_pre on either side - -spread_pre=0.0019501105739768 #result under the default calibration of cjSOE -center_pre=1.0065863855906343 #result under the default calibration of cjSOE - -do_optimizing=False # Set to True to reestimate the distribution of time preference rates - -if do_optimizing: # If you want to rerun the cstwMPC estimation, change do_optimizing to True - # Finite value requires discount factor from combined pure and mortality-induced - # discounting to be less than one, so maximum DiscFac is 1/LivPrb - DiscFacMax = 1/Params.init_cjSOE['LivPrb'][0] # - param_range = [0.995,-0.0001+DiscFacMax] - spread_range = [0.00195,0.0205] # - - if Params.do_param_dist: # If configured to estimate the distribution - LorenzBool = True - # Run the param-dist estimation - paramDistObjective = lambda spread : cstwMPC.findLorenzDistanceAtTargetKY( - Economy = EstimationEconomy, - param_name = Params.param_name, - param_count = Params.pref_type_count, - center_range = param_range, - spread = spread, - dist_type = Params.dist_type) # Distribution of DiscFac - t_start = time() - - spread_estimate = golden(paramDistObjective - ,brack=spread_range - ,tol=1e-4) - center_estimate = EstimationEconomy.center_save - t_end = time() - else: # Run the param-point estimation only - paramPointObjective = lambda center : cstwMPC.getKYratioDifference(Economy = EstimationEconomy, - param_name = Params.param_name, - param_count = Params.pref_type_count, - center = center, - spread = 0.0, - dist_type = Params.dist_type) - t_start = time() - center_estimate = brentq(paramPointObjective # Find best point estimate - ,param_range[0] - ,param_range[1],xtol=1e-6) - spread_estimate = 0.0 - t_end = time() - - print(spread_estimate) - print('****************') - print(center_estimate) - print('****************') -else: # Just use the hard-wired numbers from cstwMPC - center_estimate=center_pre - spread_estimate=spread_pre +# %% +def distributeParams(self,param_name,param_count,center,spread,dist_type): + ''' + Distributes heterogeneous values of one parameter to the AgentTypes in self.agents. + Parameters + ---------- + param_name : string + Name of the parameter to be assigned. + param_count : int + Number of different values the parameter will take on. + center : float + A measure of centrality for the distribution of the parameter. + spread : float + A measure of spread or diffusion for the distribution of the parameter. + dist_type : string + The type of distribution to be used. Can be "lognormal" or "uniform" (can expand). + Returns + ------- + None + ''' + # Get a list of discrete values for the parameter + if dist_type == 'uniform': + # If uniform, center is middle of distribution, spread is distance to either edge + param_dist = approxUniform(N=param_count,bot=center-spread,top=center+spread) + elif dist_type == 'lognormal': + # If lognormal, center is the mean and spread is the standard deviation (in log) + tail_N = 3 + param_dist = approxLognormal(N=param_count-tail_N,mu=np.log(center)-0.5*spread**2,sigma=spread,tail_N=tail_N,tail_bound=[0.0,0.9], tail_order=np.e) + + # Distribute the parameters to the various types, assigning consecutive types the same + # value if there are more types than values + replication_factor = len(self.agents) // param_count + # Note: the double division is intenger division in Python 3 and 2.7, this makes it explicit + j = 0 + b = 0 + while j < len(self.agents): + for n in range(replication_factor): + self.agents[j](AgentCount = int(self.Population*param_dist[0][b]*self.TypeWeight[n])) + exec('self.agents[j](' + param_name + '= param_dist[1][b])') + j += 1 + b += 1 + +EstimationEconomy.distributeParams = distributeParams # %% {"code_folding": [0]} # Construct the economy at date 0 EstimationEconomy.distributeParams( # Construct consumer types whose heterogeneity is in the given parameter 'DiscFac', - Params.pref_type_count,# How many different types of consumer are there + 7,# How many different types of consumer are there center_estimate, # Increase patience slightly vs cstwMPC so that maximum saving rate is higher spread_estimate, # How much difference is there across consumers Params.dist_type) # Default is for a uniform distribution