diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c2bcf480..90a50c66 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Build Project [using jupyter-book] +name: Build HTML [using jupyter-book] on: [push] jobs: tests: @@ -14,17 +14,29 @@ jobs: miniconda-version: 'latest' python-version: 3.8 environment-file: environment.yml - activate-environment: lecture-python-advanced.myst + activate-environment: quantecon - name: Display Conda Environment Versions shell: bash -l {0} run: conda list - name: Display Pip Versions shell: bash -l {0} run: pip list + # - name: Download "build" folder (cache) + # uses: dawidd6/action-download-artifact@v2 + # with: + # workflow: publish.yml + # branch: main + # name: build-cache + # path: _build - name: Build HTML shell: bash -l {0} run: | jb build lectures --path-output ./ + - name: Save Build as Artifact + uses: actions/upload-artifact@v1 + with: + name: _build + path: _build - name: Preview Deploy to Netlify uses: nwtgck/actions-netlify@v1.1 with: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 00000000..7922c67e --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,74 @@ +name: Build & Publish to GH-PAGES +on: + push: + tags: + - 'publish*' +jobs: + publish: + if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Anaconda + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + auto-activate-base: true + miniconda-version: 'latest' + python-version: 3.8 + environment-file: environment.yml + activate-environment: quantecon + - name: Display Conda Environment Versions + shell: bash -l {0} + run: conda list + - name: Display Pip Versions + shell: bash -l {0} + run: pip list + - name: Build HTML + shell: bash -l {0} + run: | + jb build lectures --path-output ./ + - name: Build Download Notebooks (sphinx-tojupyter) + shell: bash -l {0} + run: | + jb build lectures --path-output ./ --builder=custom --custom-builder=jupyter + zip -r download-notebooks.zip _build/jupyter + - uses: actions/upload-artifact@v2 + with: + name: download-notebooks + path: download-notebooks.zip + - name: Copy Download Notebooks for GH-PAGES + shell: bash -l {0} + run: | + mkdir _build/html/_notebooks + cp _build/jupyter/*.ipynb _build/html/_notebooks + - name: Deploy website to gh-pages + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: _build/html/ + cname: python-advanced.quantecon.org + - name: Upload "_build" folder (cache) + uses: actions/upload-artifact@v2 + with: + name: build-cache + path: _build + - name: Prepare lecture-python-advanced.notebooks sync + shell: bash -l {0} + run: | + mkdir -p _build/lecture-python-advanced.notebooks + cp -a _notebook_repo/. _build/lecture-python-advanced.notebooks + cp _build/jupyter/*.ipynb _build/lecture-python-advanced.notebooks + ls -a _build/lecture-python-advanced.notebooks + - name: Commit latest notebooks to lecture-python-advanced.notebooks + uses: cpina/github-action-push-to-another-repository@master + env: + API_TOKEN_GITHUB: ${{ secrets.QUANTECON_SERVICES_PAT }} + with: + source-directory: '_build/lecture-python-advanced.notebooks/' + destination-repository-username: 'QuantEcon' + destination-repository-name: 'lecture-python-advanced.notebooks' + commit-message: 'auto publishing updates to notebooks' + destination-github-username: 'quantecon-services' + user-email: services@quantecon.org diff --git a/README.md b/README.md index 538d818e..8dd79074 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,6 @@ # lecture-python-advanced.myst Migration to Myst (Source files for https://python-advanced.quantecon.org) + +# Current Hosting + +https://5ff577b6c5ec3f9de6b87508--wonderful-lalande-528d1c.netlify.app/intro.html diff --git a/environment.yml b/environment.yml index 96b5d8e2..5b5a2f2b 100644 --- a/environment.yml +++ b/environment.yml @@ -1,15 +1,18 @@ -name: lecture-python-advanced.myst +name: quantecon channels: - default dependencies: - - pip - python=3.8 - - anaconda=2020.07 + - anaconda=2020.11 + - pip - pip: - - jupytext - jupyter-book - - quantecon - - joblib - - git+https://github.com/QuantEcon/quantecon-book-theme - - git+https://github.com/executablebooks/sphinx-multitoc-numbering.git - - interpolation \ No newline at end of file + - sphinx-multitoc-numbering + - quantecon-book-theme + - sphinx-tojupyter + - sphinxext-rediraffe + - sphinx-exercise + - jupytext + - ghp-import + - jupinx + diff --git a/lectures/_config.yml b/lectures/_config.yml index 6e0c94ac..3e521c9a 100644 --- a/lectures/_config.yml +++ b/lectures/_config.yml @@ -12,4 +12,9 @@ sphinx: config: html_theme: quantecon_book_theme html_static_path: ['_static'] - mathjax_path: https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js \ No newline at end of file + mathjax_config: + TeX: + Macros: + "argmax" : "arg\\,max" + "argmin" : "arg\\,min" + mathjax_path: https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js diff --git a/lectures/_toc.yml b/lectures/_toc.yml index b511d202..8a47540e 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -49,6 +49,7 @@ - file: additive_functionals - file: lu_tricks - file: classical_filtering + - file: knowing_forecasts_of_others - part: Asset Pricing and Finance chapters: diff --git a/lectures/additive_functionals.md b/lectures/additive_functionals.md index 81ae4489..3d4893f5 100644 --- a/lectures/additive_functionals.md +++ b/lectures/additive_functionals.md @@ -1265,9 +1265,11 @@ These probability density functions help us understand mechanics underlying the ### Multiplicative Martingale as Likelihood Ratio Process -{doc}`This lecture ` studies **likelihood processes** and **likelihood ratio processes**. +[This lecture](https://python.quantecon.org/likelihood_ratio_process.html) studies **likelihood processes** +and **likelihood ratio processes**. A **likelihood ratio process** is a multiplicative martingale with mean unity. -Likelihood ratio processes exhibit the peculiar property that naturally also appears in {doc}`this lecture `. +Likelihood ratio processes exhibit the peculiar property that naturally also appears +[here](https://python.quantecon.org/likelihood_ratio_process.html). diff --git a/lectures/amss.md b/lectures/amss.md index 3d323ced..d82e388b 100644 --- a/lectures/amss.md +++ b/lectures/amss.md @@ -24,21 +24,6 @@ kernelspec: :depth: 2 ``` -**Software Requirement:** - -This lecture requires the use of some older software versions to run. If -you would like to execute this lecture please download the following -amss_environment.yml -file. This specifies the software required and an environment can be -created using [conda](https://docs.conda.io/en/latest/): - -Open a terminal: - -```{code-block} bash -conda env create --file amss_environment.yml -conda activate amss -``` - In addition to what's in Anaconda, this lecture will need the following libraries: ```{code-cell} ipython diff --git a/lectures/amss2.md b/lectures/amss2.md index fb7b2d6a..3d8c5740 100644 --- a/lectures/amss2.md +++ b/lectures/amss2.md @@ -24,21 +24,6 @@ kernelspec: :depth: 2 ``` -**Software Requirement:** - -This lecture requires the use of some older software versions to run. If -you would like to execute this lecture please download the following -amss_environment.yml -file. This specifies the software required and an environment can be -created using [conda](https://docs.conda.io/en/latest/): - -Open a terminal: - -```{code-block} bash -conda env create --file amss_environment.yml -conda activate amss -``` - In addition to what's in Anaconda, this lecture will need the following libraries: ```{code-cell} ipython diff --git a/lectures/amss3.md b/lectures/amss3.md index 5c125f2a..d8cb4c12 100644 --- a/lectures/amss3.md +++ b/lectures/amss3.md @@ -24,21 +24,6 @@ kernelspec: :depth: 2 ``` -**Software Requirement:** - -This lecture requires the use of some older software versions to run. If -you would like to execute this lecture please download the following -amss_environment.yml -file. This specifies the software required and an environment can be -created using [conda](https://docs.conda.io/en/latest/): - -Open a terminal: - -```{code-block} bash -conda env create --file amss_environment.yml -conda activate amss -``` - In addition to what's in Anaconda, this lecture will need the following libraries: ```{code-cell} ipython diff --git a/lectures/arellano.md b/lectures/arellano.md index fdddffb5..82484420 100644 --- a/lectures/arellano.md +++ b/lectures/arellano.md @@ -79,8 +79,8 @@ import numpy as np import quantecon as qe import random -from numba import jit, jitclass, int64, float64 - +from numba import jit, int64, float64 +from numba.experimental import jitclass %matplotlib inline ``` diff --git a/lectures/black_litterman.md b/lectures/black_litterman.md index 87212232..94eec898 100644 --- a/lectures/black_litterman.md +++ b/lectures/black_litterman.md @@ -197,8 +197,7 @@ sample = excess_return.rvs(T) w = np.linalg.solve(δ * Σ_est, μ_est) fig, ax = plt.subplots(figsize=(8, 5)) -ax.set_title('Mean-variance portfolio weights recommendation \ - and the market portfolio') +ax.set_title('Mean-variance portfolio weights recommendation and the market portfolio') ax.plot(np.arange(N)+1, w, 'o', c='k', label='$w$ (mean-variance)') ax.plot(np.arange(N)+1, w_m, 'o', c='r', label='$w_m$ (market portfolio)') ax.vlines(np.arange(N)+1, 0, w, lw=1) @@ -219,7 +218,7 @@ Black and Litterman's responded to this situation in the following way: - They want to continue to allow the customer to express his or her risk tolerance by setting $\delta$. - Leaving $\Sigma$ at its maximum-likelihood value, they push - $\mu$ away from its maximum value in a way designed to make + $\mu$ away from its maximum-likelihood value in a way designed to make portfolio choices that are more plausible in terms of conforming to what most people actually do. @@ -314,8 +313,7 @@ d_m = r_m / σ_m μ_m = (d_m * Σ_est @ w_m).reshape(N, 1) fig, ax = plt.subplots(figsize=(8, 5)) -ax.set_title(r'Difference between $\hat{\mu}$ (estimate) and \ - $\mu_{BL}$ (market implied)') +ax.set_title(r'Difference between $\hat{\mu}$ (estimate) and $\mu_{BL}$ (market implied)') ax.plot(np.arange(N)+1, μ_est, 'o', c='k', label='$\hat{\mu}$') ax.plot(np.arange(N)+1, μ_m, 'o', c='r', label='$\mu_{BL}$') ax.vlines(np.arange(N) + 1, μ_m, μ_est, lw=1) @@ -418,8 +416,7 @@ def BL_plot(τ): ax[0].vlines(np.arange(N)+1, μ_m, μ_est, lw=1) ax[0].axhline(0, c='k', ls='--') ax[0].set(xlim=(0, N+1), xlabel='Assets', - title=r'Relationship between $\hat{\mu}$, \ - $\mu_{BL}$and$\tilde{\mu}$') + title=r'Relationship between $\hat{\mu}$, $\mu_{BL}$, and $ \tilde{\mu}$') ax[0].xaxis.set_ticks(np.arange(1, N+1, 1)) ax[0].legend(numpoints=1) diff --git a/lectures/cattle_cycles.md b/lectures/cattle_cycles.md index aaa4876a..22eec068 100644 --- a/lectures/cattle_cycles.md +++ b/lectures/cattle_cycles.md @@ -357,15 +357,15 @@ econ2.irf(ts_length=25, shock=shock_demand) econ3.irf(ts_length=25, shock=shock_demand) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) -ax1.plot(econ1.c_irf, label='$\\rho=0.6$') -ax1.plot(econ2.c_irf, label='$\\rho=1$') -ax1.plot(econ3.c_irf, label='$\\rho=0$') +ax1.plot(econ1.c_irf, label=r'$\rho=0.6$') +ax1.plot(econ2.c_irf, label=r'$\rho=1$') +ax1.plot(econ3.c_irf, label=r'$\rho=0$') ax1.set_title('Consumption response to demand shock') ax1.legend() -ax2.plot(econ1.k_irf[:, 0], label='$\\rho=0.6$') -ax2.plot(econ2.k_irf[:, 0], label='$\\rho=1$') -ax2.plot(econ3.k_irf[:, 0], label='$\\rho=0$') +ax2.plot(econ1.k_irf[:, 0], label=r'$\rho=0.6$') +ax2.plot(econ2.k_irf[:, 0], label=r'$\rho=1$') +ax2.plot(econ3.k_irf[:, 0], label=r'$\rho=0$') ax2.set_title('Breeding stock response to demand shock') ax2.legend() plt.show() @@ -393,11 +393,11 @@ total3_irf = econ3.k_irf[:, 0] + g * econ3.k_irf[:, 1] + g * econ3.k_irf[:, 2] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) ax1.plot(econ1.k_irf[:, 0], label='Breeding Stock') ax1.plot(total1_irf, label='Total Stock') -ax1.set_title('$\\rho=0.6$') +ax1.set_title(r'$\rho=0.6$') ax2.plot(econ3.k_irf[:, 0], label='Breeding Stock') ax2.plot(total3_irf, label='Total Stock') -ax2.set_title('$\\rho=0$') +ax2.set_title(r'$\rho=0$') plt.show() ``` diff --git a/lectures/classical_filtering.md b/lectures/classical_filtering.md index cbb6d03b..80678772 100644 --- a/lectures/classical_filtering.md +++ b/lectures/classical_filtering.md @@ -172,7 +172,7 @@ or ```{math} :label: eq_55 -x_t = \sum^{t-1}_{j=0} L^{-1}_{t,t-j}\, \varepsilon_{t-j}\ +x_t = \sum^{t-1}_{j=0} L^{-1}_{t,t-j}\, \varepsilon_{t-j} ``` where $L^{-1}_{i,j}$ denotes the $i,j$ element of $L^{-1}$. @@ -223,7 +223,7 @@ $$ ### Implementation -Here's the code that computes solutions to LQ control and filtering problems using the methods described here and in :doc: lu_tricks. +Here's the code that computes solutions to LQ control and filtering problems using the methods described here and in {doc}`lu_tricks `. ```{code-cell} python3 :file: _static/lecture_specific/lu_tricks/control_and_filter.py @@ -256,7 +256,7 @@ h = 0.0 example = LQFilter(d, h, y_m, r=d) ``` -The Wold representation is computed by example.coefficients_of_c(). +The Wold representation is computed by `example.coeffs_of_c()`. Let's check that it "flips roots" as required diff --git a/lectures/cons_news.md b/lectures/cons_news.md index 9bfa38ad..caddf878 100644 --- a/lectures/cons_news.md +++ b/lectures/cons_news.md @@ -123,7 +123,7 @@ y_{t+1} - y_t = a_{t+1} - \beta a_t \quad ``` where $\{a_t\}$ is another i.i.d. normally distributed scalar -process, with means of zero and now variances $\sigma_a^2$. +process, with means of zero and now variances $\sigma_a^2 > \sigma_\epsilon^2$. The two i.i.d. shock variances are related by @@ -175,7 +175,7 @@ Using calculations in the {doc}`quantecon lecture `, where $z \in C$ is a complex variable, the covariance generating function $g (z) = \sum_{j=-\infty}^\infty g_j z^j$ -of the $\{(y_t - y_{t-1})\}$ process equals +of the $\{y_t - y_{t-1}\}$ process equals $$ g(z) = \sigma_\epsilon^2 h(z) h(z^{-1}) = \beta^{-2} \sigma_\epsilon^2 > \sigma_\epsilon^2 , @@ -420,9 +420,11 @@ exactly the same histories of nonfinancial income. The consumer with information associated with representation {eq}`eqn_1` responds to each shock $\epsilon_{t+1}$ by leaving his consumption -unaltered and **saving** all of $a_{t+1}$ in anticipation of the -permanently increased taxes that he will bear to pay for the addition -$a_{t+1}$ to his time $t+1$ nonfinancial income. +unaltered and **saving** all of $\epsilon_{t+1}$ in anticipation of the +permanently increased taxes that he will bear in order to service the permanent interest payments on the risk-free +bonds that the government has +presumably issued to pay for the one-time addition +$\epsilon_{t+1}$ to his time $t+1$ nonfinancial income. The consumer with information associated with representation {eq}`eqn_2` responds to a shock $a_{t+1}$ by increasing his consumption by diff --git a/lectures/hs_recursive_models.md b/lectures/hs_recursive_models.md index 28a3b587..cae01a7c 100644 --- a/lectures/hs_recursive_models.md +++ b/lectures/hs_recursive_models.md @@ -717,17 +717,17 @@ $$ $$ $$ -mu_t= - \beta^t [\Pi^\prime \Pi\, c_t - \Pi^\prime\, b_t] +\mu_t= - \beta^t [\Pi^\prime \Pi\, c_t - \Pi^\prime\, b_t] $$ $$ -c_t = - (\Pi^\prime \Pi)^{-1} \beta^{-t} mu_t + (\Pi^\prime \Pi)^{-1} +c_t = - (\Pi^\prime \Pi)^{-1} \beta^{-t} \mu_t + (\Pi^\prime \Pi)^{-1} \Pi^\prime b_t $$ This is called the **Frisch demand function** for consumption. -We can think of the vector $mu_t$ as playing the role of prices, +We can think of the vector $\mu_t$ as playing the role of prices, up to a common factor, for all dates and states. The scale factor is @@ -784,7 +784,7 @@ $$ b_t) \cdot ( s_t - b_t) + \ell_t^2 ] \bigl| J_0 , \ 0 < \beta < 1 $$ -*Next steps:** we move on to discuss two closely connected concepts +**Next steps:** we move on to discuss two closely connected concepts - A Planning Problem or Optimal Resource Allocation Problem - Competitive Equilibrium @@ -1753,13 +1753,14 @@ Apply the following version of a factorization identity: $$ \begin{aligned} - [\Pi &+ \beta^{1/2} L^{-1} \Lambda (I - \beta^{1/2} L^{-1} +&[\Pi + \beta^{1/2} L^{-1} \Lambda (I - \beta^{1/2} L^{-1} \Delta_h)^{-1} \Theta_h]^\prime [\Pi + \beta^{1/2} L -\Lambda (I - \beta^{1/2} L \Delta_h)^{-1} \Theta_h]\\ -&= [\hat\Pi + \beta^{1/2} L^{-1} \hat\Lambda +\Lambda (I - \beta^{1/2} L \Delta_h)^{-1} \Theta_h] \\ +&\quad = [\hat\Pi + \beta^{1/2} L^{-1} \hat\Lambda (I - \beta^{1/2} L^{-1} \Delta_h)^{-1} \Theta_h]^\prime [\hat\Pi + \beta^{1/2} L \hat\Lambda -(I - \beta^{1/2} L \Delta_h)^{-1} \Theta_h]\end{aligned} +(I - \beta^{1/2} L \Delta_h)^{-1} \Theta_h] +\end{aligned} $$ The factorization identity guarantees that the diff --git a/lectures/knowing_forecasts_of_others.md b/lectures/knowing_forecasts_of_others.md new file mode 100644 index 00000000..bdbbffe3 --- /dev/null +++ b/lectures/knowing_forecasts_of_others.md @@ -0,0 +1,1521 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst +kernelspec: + display_name: Python 3 + language: python + name: python3 +--- + +(knowing_the_forecast_of_others_v3)= +```{raw} html + +``` + +# Knowing the Forecasts of Others + +```{contents} Contents +:depth: 2 +``` + +In addition to what's in Anaconda, this lecture will need the following libraries: + +```{code-cell} ipython +--- +tags: [hide-output] +--- +!pip install --upgrade quantecon +!conda install -y -c plotly plotly plotly-orca +``` + +## Introduction + +Robert E. Lucas, Jr. {cite}`lucas75`, Kenneth Kasa {cite}`kasa`, and Robert Townsend +{cite}`townsend` showed that giving decision makers incentives to infer persistent hidden state +variables from equilibrium prices and quantities can +elongate and amplify impulse responses to aggregate +shocks in business cycle models. + +Townsend {cite}`townsend` +noted that such incentives can naturally +induce decision makers to want to forecast the forecast of others. + +This theme has been pursued and extended in analyses in which +decision makers' imperfect information forces them into pursuing an +infinite recursion of forming beliefs about the beliefs of other +(e.g., {cite}`ams`). + +Lucas {cite}`lucas75` side stepped having decision makers forecast the +forecasts of other decision makers by assuming that they simply pool their +information before forecasting. + +A **pooling equilibrium** like Lucas's plays a prominent role in this lecture. + +Because he didn’t assume such pooling, {cite}`townsend` +confronted the forecasting the forecasts of others problem. + +To formulate the problem recursively required that Townsend define decision maker's **state** vector. + +Townsend concluded that his original model required an intractable infinite dimensional state space. + +Therefore, he constructed a more manageable approximating model in which the hidden Markov component of +the demand shock is +revealed to all firms after a fixed and finite number of periods. + +In this lecture, as yet another instance of the theme that **finding the state is an art**, +we show how to formulate Townsend's original model in terms of a low-dimensional state space. + +By doing so, we show that Townsend's model shares equilibrium prices and quantities with those that +prevail in a pooling equilibrium. + +That finding emerged from a line of research about Townsend's model that culminated in +{cite}`Pearlman_Sargent2005` that built on {cite}`PCL`. + +However, rather than deploying the {cite}`PCL` machinery here, we shall rely instead on a sneaky +**guess-and-verify** tactic. + +* We compute a pooling equilibrium and represent it as an instance of a linear state-space system provided by + the Python class `quantecon.LinearStateSpace`. +* Leaving the state-transition equation for the pooling equilibrium unaltered, we alter the observation vector + for a firm to what it in in Townsend's original model. So rather than directly observing the signal received by + firms in the other industry, a firm sees the equilibrium price + of the good produced by the other industry. +* We compute a population linear least squares regression of the noisy signal that firms in the other + industry receive in a pooling equilibrium on time $t$ information that a firm receives in Townsend's + original model. The $R^2$ in this regression equals $1$. That verifies that a firm's information + set in Townsend's original model equals its information set in a pooling equilibrium. Therefore, equilibrium + prices and quantities in Townsend's original model equal those in a pooling equilibrium. + +### A Sequence of Models + +We proceed by describing a sequence of models of two industries that are linked in a +single way: shocks to the demand curves for their products have a common +component. + +The models are simplified versions of Townsend's {cite}`townsend`. + +Townsend's is a model of a rational expectations equilibrium in which firms confront +the problem **forecasting the forecasts of others**. + +In Townsend's model, firms condition their forecasts on observed endogenous variables whose equilibrium laws of motion +are determined by their own forecasting functions. + +We start with model components that we shall progressively assemble in ways that can help us to appreciate the structure of a +**pooling equilibrium** that ultimately concerns us. + +While keeping other aspects of the model the same, we shall study +consequences of alternative assumptions about what decision makers +observe. + +Technically, this lecture deploys concepts and tools that appear +in [First Look at Kalman Filter](https://python-intro.quantecon.org/kalman.html) and +[Rational Expectations Equilibrium](https://python-intro.quantecon.org/rational_expectations.html). + +## The Setting + +We cast all variables in terms of deviations from means. + +Therefore, we omit constants from inverse demand curves +and other functions. + +Firms in each of two industries $i=1,2$ use a single factor of +production, capital $k_t^i$, to produce output of a single good, +$y_t^i$. + +Firms bear quadratic costs of adjusting their capital stocks. + +A representative firm in industry $i$ has production function +$y_t^i = f k_t^i$, $f >0$, acts as a price taker with +respect to output price $P_t^i$, and maximizes + +```{math} +:label: town1 + +\begin{aligned} + E_0^i \sum_{t=0}^\infty \beta^t \left\{ P_t^i f k_t^i - .5 + h (k_{t+1}^i - k_t^i)^2 \right\} , + \quad h >0 .\end{aligned} +``` + +Demand in industry $i$ is described by the inverse demand curve + +```{math} +:label: town2 + +\begin{aligned} + P_t^i = - b Y_t^i + \theta_t + \epsilon_t^i , \quad b >0, + \end{aligned} +``` + +where $P_t^i$ is the price of good $i$ at $t$, +$Y_t^i = f K_t^i$ is output in market $i$, $\theta_t$ +is a persistent component of a demand shock that is common across the +two industries, and $\epsilon_t^i$ is an industry specific +component of the demand shock that is i.i.d. and whose time $t$ +marginal distributon is ${\mathcal N}(0, \sigma_{\epsilon}^2)$. + +We assume that $\theta_t$ is governed by + +```{math} +:label: town2a + +\begin{aligned} + \theta_{t+1} = \rho \theta_t + v_{t} + \end{aligned} +``` + +where $\{v_{t}\}$ is an i.i.d. sequence of Gaussian shocks each +with mean zero and variance $\sigma_v^2$. + +To simplify notation, we’ll study a special case of the model by setting +$h=f=1$. + +The presence of costs of adjusting their capital stocks imparts to firms an incentives to forecast the price of the +good that they sell. + +Throughout, we use the **rational expectations** equilibrium concept presented +in this lecture [Rational Expectations Equilibrium](https://python-intro.quantecon.org/rational_expectations.html). + +We let capital letters denote market wide objects and lower case letters +denote objects chosen by a representative firm. + +In each industry, a competitive equilibrium prevails. + +To rationalize the big $K$, little $k$ connection, we can +think of there being a continua of each type of firm, each indexed by +$\omega \in [0,1]$ with +$K^i = \int_0^1 k^i(\omega) d \omega$. + +In equilibrium, $k_t^i = K_t^i$, but as usual we must distinguish +between $k_t^i$ and $K_t^i$ when we pose the firm’s +optimization problem. + +## Tactics + +We shall compute +equilibrium laws of motion for capital in industry $i$ under a +sequence of assumptions about what a representative firm observes. + +Successive members of this sequence make a representative firm’s +information more and more obscure. + +We begin with the most information, then gradually withdraw information +in a way that approaches and eventually reaches the information +structure that that we are ultimately interested in. + +Thus, we shall compute equilibria under the following alternative +information structures: + +- **Perfect foresight:** future values of + $\theta_t, \epsilon_{t}^i$ are observed in industry $i$. +- **Observed but stochastic** $\theta_t$: + $\{\theta_t, \epsilon_{t}^i\}$ are realizations from a + stochastic process; current and past values of each are observed at + time $t$ but future values are not. +- **One noise-ridden observation on** $\theta_t$: Values of + $\{\theta_t, \epsilon_{t}^i\}$ separately are never observed. However, at + time $t$, a history $w^t$ of a scalar noise-ridden + observations on $\theta_t$ is observed at time $t$. +- **Two noise-ridden observations on** $\theta_t$: Values of + $\{\theta_t, \epsilon_{t}^i\}$ separately are never observed. However, at + time $t$, a history $w^t$ of *two* noise-ridden + observations on $\theta_t$ is observed at time $t$. + +Successive computations build one on another. + +We proceed by first finding an equilibrium under perfect foresight. + +To compute an equilibrium with $\theta_t$ observed, we use a +*certainty equivalence principle* to justify modifying the perfect +foresight equilibrium by replacing future values of +$\theta_s, \epsilon_{s}^i, s \geq t$ with mathematical +expectations conditioned on $\theta_t$. + +This provides the equilibrium when $\theta_t$ is observed at +$t$ but future $\theta_{t+j}$ and +$\epsilon_{t+j}^i$ are not observed. + +To find an equilibrium when only a history $w_t$ of a single noise +ridden observations on $\theta_t$ is observed, we again apply a +certainty equivalence principle and replace future values of the random +variables $\theta_s, \epsilon_{s}^i, s \geq t$ with their +mathematical expectations conditioned on $w^t$. + +To find an equilibrium when only a history $w_t$ of a *two* noisy +signal on $\theta_t$ is observed, we replace future values of the +random variables $\theta_s, \epsilon_{s}^i, s \geq t$ with their +mathematical expectations conditioned on history $w^t$. + +We call the equilibrium with two noise-ridden observations on $\theta_t$ a **pooling equilibrium**. + +- It corresponds to an arrangement in which at the beginning of each + period firms in industries $1$ and $2$ somehow get + together and share information about current values of their noisy + signals on $\theta$. + +We want ultimately to compare outcomes in a pooling equilibrium +with an equilibrium under the following alternative information structure for a firm +in industry $i$ that interested {cite}`townsend`: + +- **Firm** $i$’s **noise-ridden signal on** $\theta_t$ **and the + price in industry** $-i$, a firm in industry + $i$ observes a history $w^t$ of *one* noise-ridden signal + on $\theta_t$ and a history of industry $-i$’s price is + observed. + +With this information structure, the representative firm $i$ sees the price as well as the +aggregate state variable $Y_t^i$ in its own industry. + +That allows it to infer +the total demand shock $\theta_t + \epsilon_{t}^i$. + +However, at time $t$, the firm sees only $P_t^{-i}$ and does +not see $Y_t^{-i}$, so that firm $i$ does not directly +observe $\theta_t + \epsilon_t^{-i}$. + +Nevertheless, it will turn out that equilibrium prices and quantities in this equilibrium equal +their counterparts in a pooling equilibrium because +firms in industry $i$ are able to infer the noisy signal about the demand shock +received by firms in industry $-i$. + +We shall eventually verify this assertion by using a guess and verify tactic. [^footnote0] + +## Equilibrium conditions + +It is convenient to solve the firm’s problem without +uncertainty by forming the Lagrangian: + +$$ +\begin{aligned} +J=\sum_{t=0}^\infty \beta^t \left\{ +P_t^i k_t^i - .5 (\mu_t^i)^2 + \phi_t^i \left[ + k_t^i + \mu_t^i - k_{t+1}^i \right] \right\} \end{aligned} +$$ + +where $\{\phi_t^i\}$ is a sequence of Lagrange multipliers on the +transition law for $k_{t+1}^i$. First order conditions for the +nonstochastic problem are + +```{math} +:label: town4 + +\begin{aligned} + \phi_t^i & = \beta \phi_{t+1}^i + \beta P_{t+1}^i \\ + \mu_t^i & = \phi_t^i . \end{aligned} +``` + +Substituting the demand function {eq}`town2` for +$P_t^i$, imposing the condition that the representative firm is +representative ( $k_t^i = K_t^i$), and using the definition below +of $g_t^i$, the Euler equation {eq}`town4`, lagged +by one period, can be expressed as +$- b k_t^i + \theta_t + \epsilon_t^i + (k_{t+1}^i - k_t^i) - g_t^i =0$ +or + +```{math} +:label: pcl11 + +\begin{aligned} + k_{t+1}^i = (b+1) k_t^i - \theta_t - \epsilon_t^i + g_t^i + \end{aligned} +``` + +where we define $g_t^i$ by + +```{math} +:label: town7 + +g_t^i = \beta^{-1} (k_t^i - k_{t-1}^i) +``` + +We can write Euler equation {eq}`town4` as: + +```{math} +:label: pcl10 + +\begin{aligned} + g_t^i = P_t^i + \beta g_{t+1}^i + \end{aligned} +``` + +In addition, we have the law of motion for $\theta_t$, +{eq}`town2a`, and the demand equation +{eq}`town2`. + +In summary, with perfect foresight, equilibrium conditions for industry +$i$ include the following system of difference equations: + +```{math} +:label: sol1 + +\begin{aligned} + k_{t+1}^i & = (1+b)k_t^i - \epsilon_t^i -\theta_t + g_t^i \\ + \theta_{t+1} & = \rho \theta_t + v_t \\ + g_{t+1}^i & = \beta^{-1} (g_t^i - P_t^i) \\ + P_t^i & = -b k_t^i + \epsilon_t^i + \theta_t \end{aligned} +``` + +Without perfect foresight, the same system prevails except that the +following equation replaces the third equation of {eq}`sol1`: + +$$ +\begin{aligned} +g_{t+1,t}^i = \beta^{-1} (g_t^i - P_t^i) \end{aligned} +$$ + +where +$x_{t+1,t}$ denotes the mathematical expectation of +$x_{t+1}$ conditional on information at time $t$. + +### Equilibrium under perfect foresight + +Our first step is to compute the equilibrium law of motion for +$k_t^i$ under perfect foresight. + +Let $L$ be the lag +operator. [^footnote3] + +Equations {eq}`pcl10` and {eq}`pcl11` +imply the second order difference equation in +$k_t^i$: [^footnote4] + +```{math} +:label: euler1 + +\begin{aligned} + \left[ (L^{-1} - (1+b))(1-\beta L^{-1}) + b\right] k_t^i + = \beta L^{-1} \epsilon_t^i + \beta L^{-1} \theta_t . + \end{aligned} +``` + +Factor the polynomial in $L$ on the left side as: + +$$ +\begin{aligned} +-\beta [L^{-2} -(\beta^{-1} + (1+b))L^{-1} + \beta^{-1}] += \tilde \lambda^{-1}(L^{-1} - \tilde \lambda)(1-\tilde \lambda \beta L^{-1}) +\end{aligned} +$$ + +where $|\tilde \lambda | < 1$ is the smaller root and +$\lambda$ is the larger root of +$(\lambda-1)(\lambda-1/\beta)=b\lambda$. + +Therefore, {eq}`euler1` can be expressed as + +$$ +\begin{aligned} +\tilde \lambda^{-1}(L^{-1} - \tilde \lambda) (1-\tilde \lambda \beta L^{-1}) +k_t^i = \beta L^{-1} \epsilon_t^i + \beta L^{-1} \theta_t . +\end{aligned} +$$ + +Solving the stable root backwards and the unstable root forwards gives + +$$ +\begin{aligned} +k_{t+1}^i = \tilde \lambda k_t^i + {\tilde \lambda \beta \over 1 -\tilde +\lambda \beta L^{-1}} +(\epsilon_{t+1}^i + \theta_{t+1} ) . +\end{aligned} +$$ + +Recall that we have already set $k^i = K^i$ at the appropriate point in the argument (i.e., _after_ having derived the first-order necessary +conditions for a representative firm in industry $i$. + +Thus, under perfect foresight the equilibrium capital stock in industry $i$ satisfies + +```{math} +:label: town5 + +\begin{aligned} + k_{t+1}^i = \tilde \lambda k_t^i + \sum_{j=1}^\infty (\tilde \lambda \beta)^j + (\epsilon_{t+j}^i + \theta_{t+j}) . + \end{aligned} +``` + +Next, we shall investigate consequences of replacing future values of +$(\epsilon_{t+j}^i + \theta_{t+j})$ in equation {eq}`town5` with alternative forecasting schemes. + +In particular, we shall compute equilibrium laws of motion for capital +under alternative assumptions about the information available to +decision makers in market $i$. + +## Equilibrium with $\theta_t$ stochastic but observed at $t$ + +If future $\theta$’s are unknown at $t$, it is appropriate +to replace all random variables on the right side of +{eq}`town5` with their conditional expectations based on +the information available to decision makers in market $i$. + +For now, we assume that this information set +$I_t^p = \begin{bmatrix} \theta^t & \epsilon^{it} \end{bmatrix}$, +where $z^t$ represents the infinite history of variable +$z_s$ up to time $t$. + +Later we shall give firms less information. + +To obtain an appropriate counterpart to {eq}`town5` under our +current assumption about information, we apply a certainty equivalence +principle. + +In particular, it is appropriate to take {eq}`town5` and +replace each term $( \epsilon_{t+j}^i+ \theta_{t+j} )$ on the +right side with +$E[ (\epsilon_{t+j}^i+ \theta_{t+j}) \vert \theta^t ]$. + +After using {eq}`town2a` and the i.i.d. assumption about +$\{\epsilon_t^i\}$, this gives + +$$ +\begin{aligned} +k_{t+1}^i = \tilde \lambda k_t^i + {\tilde \lambda \beta \rho \over 1 - +\tilde \lambda \beta \rho } +\theta_t +\end{aligned} +$$ + +or + +```{math} +:label: solution1 + +\begin{aligned} + k_{t+1}^i = \tilde \lambda k_t^i + {\rho \over \lambda - \rho} \theta_t + \end{aligned} +``` + +where $\lambda \equiv (\beta \tilde \lambda)^{-1}$. + +For future purposes, it is useful to represent the equilibrium +$\{k_t^i\}_t$ process recursively as + +```{math} +:label: sol10 + +\begin{aligned} + k_{t+1}^i & = \tilde \lambda k_t^i + {1 \over \lambda - \rho} + \hat \theta_{t+1} \\ + \hat \theta_{t+1} & = \rho \theta_t \\ + \theta_{t+1} & = \rho \theta_t + v_t. \end{aligned} +``` + +### Filtering + +#### One noisy signal + +We get closer to a model that we ultimately want to study by now +assuming that firms in market $i$ do not observe $\theta_t$, +but instead observe a history $w^t$ of noisy signals at time $t$. + +In particular, assume that + +```{math} +:label: kf1&2 + +\begin{aligned} + w_t & = \theta_t + e_t \label{kf1} \\ + \theta_{t+1} & = \rho \theta_t + v_t + \end{aligned} +``` + +where $e_t$ and $v_t$ are mutually independent +i.i.d. Gaussian shock processes with means of zero and variances +$\sigma_e^2$ and $\sigma_v^2$, respectively. + +Define + +$$ +\begin{aligned} +\hat \theta_{t+1} = E(\theta_{t+1} | w^t) +\end{aligned} +$$ + +where $w^t = [w_t, w_{t-1}, \ldots, w_0]$ denotes the history of the $w_s$ process up to +and including $t$. + +Associated with the state-space representation +{eq}`kf1&2` is the *innovations +representation* + +```{math} +:label: kf3&4 + +\begin{aligned} + \hat \theta_{t+1} & = \rho \hat \theta_t + k a_t \\ + w_t & = \hat \theta_t + a_t + \end{aligned} +``` + +where $a_t \equiv w_t - E(w_t | w^{t-1})$ is the *innovations* +process in $w_t$ and the Kalman gain $k$ is + +```{math} +:label: kal1 + +\begin{aligned} + k = {\rho p \over p + \sigma_e^2} \end{aligned} +``` + +and where $p$ satisfies the Riccati equation + +```{math} +:label: kf6 + +\begin{aligned} + p = \sigma_v^2 + { p \rho^2 \sigma_e^2 \over \sigma_e^2 + p}. + \end{aligned} +``` + +#### $\theta$-reconstruction error: + +Define the state *reconstruction error* $\tilde \theta_t$ by + +$$ +\begin{aligned} +\tilde \theta_t = \theta_t - \hat \theta_t . +\end{aligned} +$$ + +Then $p = E \tilde \theta_t^2$. + +Equations {eq}`kf1&2` and {eq}`kf3&4` imply + +```{math} +:label: kf7 + +\begin{aligned} + \tilde \theta_{t+1} = (\rho - k) \tilde \theta_t + v_t - k e_t . + \end{aligned} +``` + +Now notice that we can express $\hat \theta_{t+1}$ as + +```{math} +:label: kf8 + +\hat \theta_{t+1} = [\rho \theta_t + v_t] + [ ke_t - (\rho -k) \tilde \theta_t - v_t] , +``` + +where the first term in braces equals +$\theta_{t+1}$ and the second term in braces equals +$-\tilde \theta_{t+1}$. + +We can express {eq}`solution1` as + +```{math} +:label: solution2 + +\begin{aligned} + k_{t+1}^i = \tilde \lambda k_t^i + {1 \over \lambda - \rho} + E \theta_{t+1} | \theta^t . + \end{aligned} +``` + +An application of a certainty equivalence principle asserts that when +only $w^t$ is observed, the appropriate solution is found by +replacing the information set $\theta^t$ with $w^t$ in +{eq}`solution2`. + +Making this substitution and using {eq}`kf8` leads to + +```{math} +:label: kf9 + +\begin{aligned} + k_{t+1}^i = \tilde \lambda k_t^i + {\rho \over \lambda - \rho} \theta_t + {k \over \lambda - \rho} e_t - {\rho - k \over \lambda - \rho} \tilde \theta_t . + \end{aligned} +``` + +Simplifying equation {eq}`kf8`, we also have + +```{math} +:label: kf8a + +\begin{aligned} + \hat \theta_{t+1} = \rho \theta_t + ke_t - (\rho -k) \tilde \theta_t . + \end{aligned} +``` + +Equations {eq}`kf9`, {eq}`kf8a` describe +the equilibrium when $w^t$ is observed. + +Relative to {eq}`solution1`, the equilibrium acquires a new +state variable, namely, the $\theta$–reconstruction error, +$\tilde \theta_t$. + +For future purposes, by using {eq}`kal1`, it is useful to +write {eq}`kf9` as + +```{math} +:label: sol2a + +\begin{aligned} + k_{t+1}^i = \tilde \lambda k_t^i + {\rho \over \lambda - \rho } \theta_t + {1 \over \lambda - \rho} {p \rho \over p + \sigma_e^2} e_t - {1 \over \lambda - \rho} {\rho \sigma_e^2 \over p + \sigma_e^2} + \tilde \theta_t + \end{aligned} +``` + +In summary, when decision makers in market $i$ observe a noisy +signal $w_t$ on $\theta_t$ at $t$, we can represent an +equilibrium law of motion for $k_t^i$ as + +```{math} +:label: sol0a + +\begin{aligned} +k_{t+1}^i & = \tilde \lambda k_t^i + {1 \over \lambda - \rho} \hat \theta_{t+1} \\ +\hat \theta_{t+1} & = \rho \theta_t + {\rho p \over p + \sigma_e^2} e_t - {\rho \sigma_e^2 \over p + \sigma_e^2} \tilde \theta_t \\ +\tilde \theta_{t+1} & = { \rho \sigma_e^2 \over p + \sigma_e^2} \tilde + \theta_t - {p \rho \over p + \sigma_e^2} e_t + v_t \\ +\theta_{t+1} & = \rho \theta_t + v_t . \end{aligned} +``` + +### Two noisy signals + +We now construct a **pooling equilibrium** by assuming that a firm in +industry $i$ receives a vector $w_t$ of *two* noisy signals +on $\theta_t$: + +$$ +\begin{eqnarray*} +\theta_{t+1} & = & \rho\theta_{t}+v_{t} \label{kf20} \\ +w_{t} & = & \begin{bmatrix}1\\ +1 +\end{bmatrix}\theta_{t}+\begin{bmatrix}e_{1t} \\ +e_{2t} +\end{bmatrix} \label{kf21} +\end{eqnarray*} +$$ + +To justify that we are constructing is a **pooling equilibrium** we can +assume that + +$$ +\begin{aligned} +\begin{bmatrix} e_{1t} \\ e_{2t} \end{bmatrix} = +\begin{bmatrix} \epsilon_{t}^1 \\ \epsilon_{t}^2 \end{bmatrix} +\end{aligned} +$$ + +so that a firm in industry $i$ observes the noisy signals on that +$\theta_t$ presented to firms in both industries $i$ and +$-i$. + +The appropriate innovations representation becomes + +```{math} +:label: kf22&23 + +\begin{aligned} + \hat \theta_{t+1} & = \rho + \hat \theta_t + k a_t \\ + w_t & = \begin{bmatrix} 1 \\ 1 \end{bmatrix} \hat \theta_t + a_t + \end{aligned} +``` + +where $a_t \equiv w_t - E [w_t | w^{t-1}]$ is a +$(2 \times 1)$ vector of innovations in $w_t$ and $k$ +is now a $(1 \times 2)$ vector of Kalman gains. + +Formulas for the Kalman filter imply that + +```{math} +:label: kf24 + +\begin{aligned} + k ={ \rho p \over 2 p + \sigma_e^2} + \begin{bmatrix}1 & 1 \end{bmatrix} + \end{aligned} +``` + +where $p = E \tilde \theta_t \tilde \theta_t^T$ now satisfies the +Riccati equation + +```{math} +:label: ricc2 + +\begin{aligned} + p = \sigma_v^2 + {p \rho^2 \sigma_e^2 \over 2 p + \sigma_e^2}. + \end{aligned} +``` + +Thus, when a representative firm in industry $i$ observes *two* +noisy signals on $\theta_t$, we can express the equilibrium law of +motion for capital recursively as + +```{math} +:label: sol3 + +\begin{aligned} +k_{t+1}^i & = \tilde \lambda k_t^i + {1 \over \lambda - \rho}\hat \theta_{t+1} \\ +\hat \theta_{t+1} & = \rho \theta_t + {\rho p \over 2 p + \sigma_e^2} (e_{1t}+e_{2t}) - {\rho \sigma_e^2 \over 2 p + \sigma_e^2} \tilde \theta_t \\ +\tilde \theta_{t+1} & = { \rho \sigma_e^2 \over 2 p + \sigma_e^2} \tilde \theta_t - {p \rho \over 2 p + \sigma_e^2}(e_{1t}+e_{2t}) +v_t \\ +\theta_{t+1} & = \rho \theta_t + v_t . + \end{aligned} +``` + +Below, by using a guess-and-verify tactic, we shall show that outcomes in this **pooling equilibrium** equal those in an equilibrium under the alternative +information structure that interested {cite}`townsend`. [^footnote5] + +## Guess-and-verify tactic + +As a preliminary step we shall take our recursive representation {eq}`sol0a` +of an equilibrium in industry $i$ with one noisy signal +on $\theta_t$ and perform the following steps: + +- Compute $\lambda$ and $\tilde{\lambda}$ by posing a + root-finding problem and then solving it using `numpy.roots` +- Compute $p$ by forming the appropriate discrete Riccati equation and then solving it + using `quantecon.solve_discrete_riccati` +- Add a *measurement equation* for + $P_t^i = b k_t^i + \theta_t + e_t$, $\theta_t + e_t$, + and $e_t$ to system {eq}`sol0a`. Write the resulting system + in state-space form and encode it using `quantecon.LinearStateSpace` +- Use methods of the `quantecon.LinearStateSpace` to compute impulse response + functions of $k_t^i$ with respect to shocks $v_t, e_t$. + +After analyzing the one-noisy-signal structure in this way, by making appropriate modifications +we shall analyze the two-noisy-signal +structure. + +We proceed to analyze first the one-noisy-signal structure and then the two-noisy-signal structure. + +## Equilibrium with one signal on $\theta_t$ + +### Step 1: Solve for $\tilde{\lambda}$ and $\lambda$ + +1. Cast + $\left(\lambda-1\right)\left(\lambda-\frac{1}{\beta}\right)=b\lambda$ + as $p\left(\lambda\right)=0$ where $p$ is a polynomial + function of $\lambda$. +1. Use `numpy.roots` to solve for the roots of $p$ +1. Verify $\lambda \approx \frac{1}{\beta\tilde{\lambda}}$ + +Note that +$p\left(\lambda\right)=\lambda^{2}-\left(1+b+\frac{1}{\beta}\right)\lambda+\frac{1}{\beta}$. + +### Step 2: Solve for $p$ + +1. Cast + $p=\sigma_{v}^{2}+\frac{p\rho^{2}\sigma_{e}^{2}}{2p+\sigma_{e}^{2}}$ + as a discrete matrix Riccati equation. +1. Use `quantecon.solve_discrete_riccati` to solve for $p$ +1. Verify + $p \approx\sigma_{v}^{2}+\frac{p\rho^{2}\sigma_{e}^{2}}{2p+\sigma_{e}^{2}}$ + +Note that: + +$$ +\begin{aligned} + A & = & \left[\begin{array}{c} + \rho\end{array}\right]\\ + B & = & \left[\begin{array}{c} + \sqrt{2}\end{array}\right]\\ + R & = & \left[\begin{array}{c} + \sigma_{e}^{2}\end{array}\right]\\ + Q & = & \left[\begin{array}{c} + \sigma_{v}^{2}\end{array}\right]\\ + N & = & \left[\begin{array}{c} + 0\end{array}\right] +\end{aligned} +$$ + +### Step 3: Represent the system using `quantecon.LinearStateSpace` + +We use the following representation for constructing the +`quantecon.LinearStateSpace` instance. + +$$ +\begin{eqnarray*} +\underbrace{\left[\begin{array}{c} +e_{t+1}\\ +k_{t+1}^{i}\\ +\tilde{\theta}_{t+1}\\ +P_{t+1}\\ +\theta_{t+1}\\ +v_{t+1} +\end{array}\right]}_{x_{t+1}} & = & \underbrace{\left[\begin{array}{cccccc} +0 & 0 & 0 & 0 & 0 & 0\\ +\frac{\kappa}{\lambda-\rho} & \tilde{\lambda} & \frac{-1}{\lambda-\rho}\frac{\kappa\sigma_{e}^{2}}{p} & 0 & \frac{\rho}{\lambda-\rho} & 0\\ +-\kappa & 0 & \frac{\kappa\sigma_{e}^{2}}{p} & 0 & 0 & 1\\ +\frac{b\kappa}{\lambda-\rho} & b\tilde{\lambda} & \frac{-b}{\lambda-\rho}\frac{\kappa\sigma_{e}^{2}}{p} & 0 & \frac{b\rho}{\lambda-\rho}+\rho & 1\\ +0 & 0 & 0 & 0 & \rho & 1\\ +0 & 0 & 0 & 0 & 0 & 0 +\end{array}\right]}_{A}\underbrace{\left[\begin{array}{c} +e_{t}\\ +k_{t}^{i}\\ +\tilde{\theta}_{t}\\ +P_{t}\\ +\theta_{t}\\ +v_{t} +\end{array}\right]}_{x_{t}}+\underbrace{\left[\begin{array}{cc} +\sigma_{e} & 0\\ +0 & 0\\ +0 & 0\\ +\sigma_{e} & 0\\ +0 & 0\\ +0 & \sigma_{v} +\end{array}\right]}_{C}\left[\begin{array}{c} +z_{1,t+1}\\ +z_{2,t+1} +\end{array}\right]\\ +\underbrace{\left[\begin{array}{c} +P_{t}\\ +e_{t}+\theta_{t}\\ +e_{t} +\end{array}\right]}_{y_{t}} & = & \underbrace{\left[\begin{array}{cccccc} +0 & 0 & 0 & 1 & 0 & 0\\ +1 & 0 & 0 & 0 & 1 & 0\\ +1 & 0 & 0 & 0 & 0 & 0 +\end{array}\right]}_{G}\underbrace{\left[\begin{array}{c} +e_{t}\\ +k_{t}^{i}\\ +\tilde{\theta}_{t}\\ +P_{t}\\ +\theta_{t}\\ +v_{t} +\end{array}\right]}_{x_{t}}+\underbrace{\left[\begin{array}{c} +0\\ +0\\ +0 +\end{array}\right]}_{H}w_{t+1}\\ +\left[\begin{array}{c} +z_{1,t+1}\\ +z_{2,t+1}\\ +w_{t+1} +\end{array}\right] & \sim & \mathcal{N}\left(0,I\right)\\ +\kappa & = & \frac{\rho p}{p+\sigma_{e}^{2}} +\end{eqnarray*} +$$ + +This representation includes extraneous variables such as $P_{t}$ in the +state vector. + +We formulate things in this way because +it allows us easily to compute covariances of these variables with other +components of the state vector (step 5 above) by using the `stationary_distributions` method of the `LinearStateSpace` class. + +```{code-cell} ipython +import numpy as np +import quantecon as qe +from plotly.subplots import make_subplots +import plotly.graph_objects as go +import plotly.express as px +import plotly.offline as pyo +from statsmodels.regression.linear_model import OLS +from IPython.display import display, Latex, Image + +pyo.init_notebook_mode(connected=True) +``` + +```{code-cell} python3 +β = 0.9 # Discount factor +ρ = 0.8 # Persistence parameter for the hidden state +b = 1.5 # Demand curve parameter +σ_v = 0.5 # Standard deviation of shock to θ_t +σ_e = 0.6 # Standard deviation of shocks to w_t +``` + +```{code-cell} python3 +# Compute λ +poly = np.array([1, -(1 + β + b) / β, 1 / β]) +roots_poly = np.roots(poly) +λ_tilde = roots_poly.min() +λ = roots_poly.max() +``` + +```{code-cell} python3 +# Verify that λ = (βλ_tilde) ^ (-1) +tol = 1e-12 +np.max(np.abs(λ - 1 / (β * λ_tilde))) < tol +``` + +```{code-cell} python3 +A_ricc = np.array([[ρ]]) +B_ricc = np.array([[1.]]) +R_ricc = np.array([[σ_e ** 2]]) +Q_ricc = np.array([[σ_v ** 2]]) +N_ricc = np.zeros((1, 1)) +p = qe.solve_discrete_riccati(A_ricc, B_ricc, Q_ricc, R_ricc, N_ricc).item() + +p_one = p # Save for comparison later +``` + +```{code-cell} python3 +# Verify that p = σ_v ^ 2 + p * ρ ^ 2 - (ρ * p) ^ 2 / (p + σ_e ** 2) +tol = 1e-12 +np.abs(p - (σ_v ** 2 + p * ρ ** 2 - (ρ * p) ** 2 / (p + σ_e ** 2))) < tol +``` + +```{code-cell} python3 +κ = ρ * p / (p + σ_e ** 2) +κ_prod = κ * σ_e ** 2 / p + +κ_one = κ # Save for comparison later + +A_lss = np.array([[0., 0., 0., 0., 0., 0.], + [κ / (λ - ρ), λ_tilde, -κ_prod / (λ - ρ), 0., ρ / (λ - ρ), 0.], + [-κ, 0., κ_prod, 0., 0., 1.], + [b * κ / (λ - ρ) , b * λ_tilde, -b * κ_prod / (λ - ρ), 0., b * ρ / (λ - ρ) + ρ, 1.], + [0., 0., 0., 0., ρ, 1.], + [0., 0., 0., 0., 0., 0.]]) + +C_lss = np.array([[σ_e, 0.], + [0., 0.], + [0., 0.], + [σ_e, 0.], + [0., 0.], + [0., σ_v]]) + +G_lss = np.array([[0., 0., 0., 1., 0., 0.], + [1., 0., 0., 0., 1., 0.], + [1., 0., 0., 0., 0., 0.]]) +``` + +```{code-cell} python3 +mu_0 = np.array([0., 0., 0., 0., 0., 0.]) + +lss = qe.LinearStateSpace(A_lss, C_lss, G_lss, mu_0=mu_0) +``` + +```{code-cell} python3 +ts_length = 100_000 +x, y = lss.simulate(ts_length, random_state=1) +``` + +```{code-cell} python3 +# Verify that two ways of computing P_t match +np.max(np.abs(np.array([[1., b, 0., 0., 1., 0.]]) @ x - x[3])) < 1e-12 +``` + +### Step 4: Compute impulse response functions + +To compute impulse response functions of $k_t^i$, we use the `impulse_response` method of the +`quantecon.LinearStateSpace` class and plot the result. + +```{code-cell} python3 +xcoef, ycoef = lss.impulse_response(j=21) +data = np.array([xcoef])[0, :, 1, :] + +fig = go.Figure(data=go.Scatter(y=data[:-1, 0], name=r'$e_{t+1}$')) +fig.add_trace(go.Scatter(y=data[1:, 1], name=r'$v_{t+1}$')) +fig.update_layout(title=r'Impulse Response Function', + xaxis_title='Time', + yaxis_title=r'$k^{i}_{t}$') +fig1 = fig +# Export to PNG file +Image(fig1.to_image(format="png")) +# fig1.show() will provide interactive plot when running +# notebook locally +``` + +### Step 5: Compute stationary covariance matrices and population regressions + +We compute stationary covariance matrices by +calling the `stationary_distributions` method of +the `quantecon.LinearStateSpace` class. + +By appropriately decomposing the covariance matrix of the state vector, we obtain ingredients +of some population regression coefficients. + +$$ +\Sigma_{x}=\left[\begin{array}{cc} +\Sigma_{11} & \Sigma_{12}\\ +\Sigma_{21} & \Sigma_{22} +\end{array}\right] +$$ + +where $\Sigma_{11}$ is the covariance matrix of dependent variables and $\Sigma_{22}$ is the covariance matrix of independent variables. + +Regression coefficients are $\beta=\Sigma_{21}\Sigma_{22}^{-1}$. + +To verify an instance of a law of large numbers computation, we construct a long simulation of +the state vector and for the resulting sample compute +the ordinary least-squares estimator of $\beta$ that we shall compare to the corresponding population regression +coefficients. + +```{code-cell} python3 +_, _, Σ_x, Σ_y = lss.stationary_distributions() + +Σ_11 = Σ_x[0, 0] +Σ_12 = Σ_x[0, 1:4] +Σ_21 = Σ_x[1:4, 0] +Σ_22 = Σ_x[1:4, 1:4] + +reg_coeffs = Σ_12 @ np.linalg.inv(Σ_22) + +print('Regression coefficients (e_t on k_t, P_t, \\tilde{\\theta_t})') +print('------------------------------') +print(r'k_t:', reg_coeffs[0]) +print(r'\tilde{\theta_t}:', reg_coeffs[1]) +print(r'P_t:', reg_coeffs[2]) +``` + +```{code-cell} python3 +# Compute R squared +R_squared = reg_coeffs @ Σ_x[1:4, 1:4] @ reg_coeffs / Σ_x[0, 0] +R_squared +``` + +```{code-cell} python3 +# Verify that the computed coefficients are close to least squares estimates +model = OLS(x[0], x[1:4].T) +reg_res = model.fit() +np.max(np.abs(reg_coeffs - reg_res.params)) < 1e-2 +``` + +```{code-cell} python3 +# Verify that R_squared matches least squares estimate +np.abs(reg_res.rsquared - R_squared) < 1e-2 +``` + +```{code-cell} python3 +# Verify that θ_t + e_t can be recovered +model = OLS(y[1], x[1:4].T) +reg_res = model.fit() +np.abs(reg_res.rsquared - 1.) < 1e-6 +``` + +## Equilibrium with two noisy signals on $\theta_t$ + +Steps 1, 4, and 5 are identical to those for the one-noisy-signal structure. + +Step 2 requires only a straightforward modification. + +For step 3, we use construct the following state-space representation so that we can get our hands on +all of the random processes that we require in order to compute a regression of the noisy signal about +$\theta$ from the other industry that a firm receives directly in a pooling equilibrium on the information that +a firm receives in Townsend's original model. + +For this purpose, we include equilibrium goods prices from both industries appear in the state vector: + +$$ +\begin{eqnarray*} +\underbrace{\left[\begin{array}{c} +e_{1,t+1}\\ +e_{2,t+1}\\ +k_{t+1}^{i}\\ +\tilde{\theta}_{t+1}\\ +P_{t+1}^{1}\\ +P_{t+1}^{2}\\ +\theta_{t+1}\\ +v_{t+1} +\end{array}\right]}_{x_{t+1}} & = & \underbrace{\left[\begin{array}{cccccccc} +0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ +0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ +\frac{\kappa}{\lambda-\rho} & \frac{\kappa}{\lambda-\rho} & \tilde{\lambda} & \frac{-1}{\lambda-\rho}\frac{\kappa\sigma_{e}^{2}}{p} & 0 & 0 & \frac{\rho}{\lambda-\rho} & 0\\ +-\kappa & -\kappa & 0 & \frac{\kappa\sigma_{e}^{2}}{p} & 0 & 0 & 0 & 1\\ +\frac{b\kappa}{\lambda-\rho} & \frac{b\kappa}{\lambda-\rho} & b\tilde{\lambda} & \frac{-b}{\lambda-\rho}\frac{\kappa\sigma_{e}^{2}}{p} & 0 & 0 & \frac{b\rho}{\lambda-\rho}+\rho & 1\\ +\frac{b\kappa}{\lambda-\rho} & \frac{b\kappa}{\lambda-\rho} & b\tilde{\lambda} & \frac{-b}{\lambda-\rho}\frac{\kappa\sigma_{e}^{2}}{p} & 0 & 0 & \frac{b\rho}{\lambda-\rho}+\rho & 1\\ +0 & 0 & 0 & 0 & 0 & 0 & \rho & 1\\ +0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 +\end{array}\right]}_{A}\underbrace{\left[\begin{array}{c} +e_{1,t}\\ +e_{2,t}\\ +k_{t}^{i}\\ +\tilde{\theta}_{t}\\ +P_{t}^{1}\\ +P_{t}^{2}\\ +\theta_{t}\\ +v_{t} +\end{array}\right]}_{x_{t}}+\underbrace{\left[\begin{array}{ccc} +\sigma_{e} & 0 & 0\\ +0 & \sigma_{e} & 0\\ +0 & 0 & 0\\ +0 & 0 & 0\\ +\sigma_{e} & 0 & 0\\ +0 & \sigma_{e} & 0\\ +0 & 0 & 0\\ +0 & 0 & \sigma_{v} +\end{array}\right]}_{C}\left[\begin{array}{c} +z_{1,t+1}\\ +z_{2,t+1}\\ +z_{3,t+1} +\end{array}\right]\\ +\underbrace{\left[\begin{array}{c} +P_{t}^{1}\\ +P_{t}^{2}\\ +e_{1,t}+\theta_{t}\\ +e_{2,t}+\theta_{t}\\ +e_{1,t}\\ +e_{2,t} +\end{array}\right]}_{y_{t}} & = & \underbrace{\left[\begin{array}{cccccccc} +0 & 0 & 0 & 0 & 1 & 0 & 0 & 0\\ +0 & 0 & 0 & 0 & 0 & 1 & 0 & 0\\ +1 & 0 & 0 & 0 & 0 & 0 & 1 & 0\\ +0 & 1 & 0 & 0 & 0 & 0 & 1 & 0\\ +1 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ +0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 +\end{array}\right]}_{G}\underbrace{\left[\begin{array}{c} +e_{1,t}\\ +e_{2,t}\\ +k_{t}^{i}\\ +\tilde{\theta}_{t}\\ +P_{t}^{1}\\ +P_{t}^{2}\\ +\theta_{t}\\ +v_{t} +\end{array}\right]}_{x_{t}}+\underbrace{\left[\begin{array}{c} +0\\ +0\\ +0\\ +0\\ +0\\ +0 +\end{array}\right]}_{H}w_{t+1}\\ +\left[\begin{array}{c} +z_{1,t+1}\\ +z_{2,t+1}\\ +z_{3,t+1}\\ +w_{t+1} +\end{array}\right] & \sim & \mathcal{N}\left(0,I\right)\\ +\kappa & = & \frac{\rho p}{2p+\sigma_{e}^{2}} +\end{eqnarray*} +$$ + +```{code-cell} python3 +A_ricc = np.array([[ρ]]) +B_ricc = np.array([[np.sqrt(2)]]) +R_ricc = np.array([[σ_e ** 2]]) +Q_ricc = np.array([[σ_v ** 2]]) +N_ricc = np.zeros((1, 1)) +p = qe.solve_discrete_riccati(A_ricc, B_ricc, Q_ricc, R_ricc, N_ricc).item() + +p_two = p # Save for comparison later +``` + +```{code-cell} python3 +# Verify that p = σ_v^2 + (pρ^2σ_e^2) / (2p + σ_e^2) +tol = 1e-12 +np.abs(p - (σ_v ** 2 + p * ρ ** 2 * σ_e ** 2 / (2 * p + σ_e ** 2))) < tol +``` + +```{code-cell} python3 +κ = ρ * p / (2 * p + σ_e ** 2) +κ_prod = κ * σ_e ** 2 / p + +κ_two = κ # Save for comparison later + +A_lss = np.array([[0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [κ / (λ - ρ), κ / (λ - ρ), λ_tilde, -κ_prod / (λ - ρ), 0., 0., ρ / (λ - ρ), 0.], + [-κ, -κ, 0., κ_prod, 0., 0., 0., 1.], + [b * κ / (λ - ρ), b * κ / (λ - ρ), b * λ_tilde, -b * κ_prod / (λ - ρ), 0., 0., b * ρ / (λ - ρ) + ρ, 1.], + [b * κ / (λ - ρ), b * κ / (λ - ρ), b * λ_tilde, -b * κ_prod / (λ - ρ), 0., 0., b * ρ / (λ - ρ) + ρ, 1.], + [0., 0., 0., 0., 0., 0., ρ, 1.], + [0., 0., 0., 0., 0., 0., 0., 0.]]) + +C_lss = np.array([[σ_e, 0., 0.], + [0., σ_e, 0.], + [0., 0., 0.], + [0., 0., 0.], + [σ_e, 0., 0.], + [0., σ_e, 0.], + [0., 0., 0.], + [0., 0., σ_v]]) + +G_lss = np.array([[0., 0., 0., 0., 1., 0., 0., 0.], + [0., 0, 0, 0., 0., 1., 0., 0.], + [1., 0., 0., 0., 0., 0., 1., 0.], + [0., 1., 0., 0., 0., 0., 1., 0.], + [1., 0., 0., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 0., 0., 0., 0.]]) +``` + +```{code-cell} python3 +mu_0 = np.array([0., 0., 0., 0., 0., 0., 0., 0.]) + +lss = qe.LinearStateSpace(A_lss, C_lss, G_lss, mu_0=mu_0) +``` + +```{code-cell} python3 +ts_length = 100_000 +x, y = lss.simulate(ts_length, random_state=1) +``` + +```{code-cell} python3 +xcoef, ycoef = lss.impulse_response(j=20) +``` + +```{code-cell} python3 +data = np.array([xcoef])[0, :, 2, :] + +fig = go.Figure(data=go.Scatter(y=data[:-1, 0], name=r'$e_{1,t+1}$')) +fig.add_trace(go.Scatter(y=data[:-1, 1], name=r'$e_{2,t+1}$')) +fig.add_trace(go.Scatter(y=data[1:, 2], name=r'$v_{t+1}$')) +fig.update_layout(title=r'Impulse Response Function', + xaxis_title='Time', + yaxis_title=r'$k^{i}_{t}$') +fig2=fig +# Export to PNG file +Image(fig2.to_image(format="png")) +# fig2.show() will provide interactive plot when running +# notebook locally +``` + +```{code-cell} python3 +_, _, Σ_x, Σ_y = lss.stationary_distributions() + +Σ_11 = Σ_x[1, 1] +Σ_12 = Σ_x[1, 2:5] +Σ_21 = Σ_x[2:5, 1] +Σ_22 = Σ_x[2:5, 2:5] + +reg_coeffs = Σ_12 @ np.linalg.inv(Σ_22) + +print('Regression coefficients (e_{2,t} on k_t, P^{1}_t, \\tilde{\\theta_t})') +print('------------------------------') +print(r'k_t:', reg_coeffs[0]) +print(r'\tilde{\theta_t}:', reg_coeffs[1]) +print(r'P_t:', reg_coeffs[2]) +``` + +```{code-cell} python3 +# Compute R squared +R_squared = reg_coeffs @ Σ_x[2:5, 2:5] @ reg_coeffs / Σ_x[1, 1] +R_squared +``` + +```{code-cell} python3 +# Verify that the computed coefficients are close to least squares estimates +model = OLS(x[1], x[2:5].T) +reg_res = model.fit() +np.max(np.abs(reg_coeffs - reg_res.params)) < 1e-2 +``` + +```{code-cell} python3 +# Verify that R_squared matches least squares estimate +np.abs(reg_res.rsquared - R_squared) < 1e-2 +``` + +```{code-cell} python3 +_, _, Σ_x, Σ_y = lss.stationary_distributions() + +Σ_11 = Σ_x[1, 1] +Σ_12 = Σ_x[1, 2:6] +Σ_21 = Σ_x[2:6, 1] +Σ_22 = Σ_x[2:6, 2:6] + +reg_coeffs = Σ_12 @ np.linalg.inv(Σ_22) + +print('Regression coefficients (e_{2,t} on k_t, P^{1}_t, P^{2}_t, \\tilde{\\theta_t})') +print('------------------------------') +print(r'k_t:', reg_coeffs[0]) +print(r'\tilde{\theta_t}:', reg_coeffs[1]) +print(r'P^{1}_t:', reg_coeffs[2]) +print(r'P^{2}_t:', reg_coeffs[3]) +``` + +```{code-cell} python3 +# Compute R squared +R_squared = reg_coeffs @ Σ_x[2:6, 2:6] @ reg_coeffs / Σ_x[1, 1] +R_squared +``` + +## Key step + +Now we come to the key step of verifying that equilibrium outcomes for prices and quantities are identical +in the pooling equilibrium and Townsend's original model. + +We accomplish this by compute a population linear least squares regression of the noisy signal that firms in the other +industry receive in a pooling equilibrium on time $t$ information that a firm receives in Townsend's +original model. + +Let's compute the regression and stare at the $R^2$: + +```{code-cell} python3 +# Verify that θ_t + e^{2}_t can be recovered + +# θ_t + e^{2}_t on k^{i}_t, P^{1}_t, P^{2}_t, \\tilde{\\theta_t} + + +model = OLS(y[1], x[2:6].T) +reg_res = model.fit() +np.abs(reg_res.rsquared - 1.) < 1e-6 +``` + +```{code-cell} python3 +reg_res.rsquared +``` + +The $R^2$ in this regression equals $1$. + +That verifies that a firm's information +set in Townsend's original model equals its information set in a pooling equilibrium. + +Therefore, equilibrium prices and quantities in Townsend's original model equal those in a pooling equilibrium. + +## Comparison of the two signal structures + +It is enlightening side by side to plot impulse response functions for capital in an industry for the two +information noisy-signal information structures. + +Please remember that the two-signal structure corresponds to the **pooling equilibrium** and also +**Townsend's original model**. + +```{code-cell} python3 +fig_comb = go.Figure(data=[*fig1.data, + *fig2.update_traces(xaxis='x2', yaxis='y2').data]).set_subplots(1, 2, + subplot_titles=("One noisy-signal structure", "Two noisy-signal structure"), + horizontal_spacing=0.1, + shared_yaxes=True) +# Export to PNG file +Image(fig_comb.to_image(format="png")) +# fig_comb.show() will provide interactive plot when running +# notebook locally +``` + +The graphs above show that + +* the response of $k_t^i$ to shocks $v_t$ to the hidden Markov demand state $\theta_t$ process is **larger** in + the two-noisy=signal structure +* the response of $k_t^i$ to idiosyncratic *own-market* noise-shocks $e_t$ is **smaller** in the two-noisy-signal structure + +Taken together, these findings in turn can be shown to imply that time series correlations and coherences between outputs in +the two industries are higher in the two-noisy-signals or **pooling** model. + +The enhanced influence of the shocks $v_t$ to the hidden Markov demand state $\theta_t$ process that +emerges from the two-noisy-signal model relative to the one-noisy-signal model is a symptom of a lower +equilibrium hidden-state reconstruction error variance in the two-signal model: + +```{code-cell} python3 +display(Latex('$\\textbf{Reconstruction error variances}$')) +display(Latex(f'One-noise structure: ${round(p_one, 6)}$')) +display(Latex(f'Two-noise structure: ${round(p_two, 6)}$')) +``` + +Kalman gains for the two +structures are + +```{code-cell} python3 +display(Latex('$\\textbf{Kalman Gains}$')) +display(Latex(f'One noisy-signal structure: ${round(κ_one, 6)}$')) +display(Latex(f'Two noisy-signals structure: ${round(κ_two, 6)}$')) +``` + +## Notes on History of the Problem + +To truncate what he saw as an intractable, infinite dimensional state space, +Townsend constructed an approximating model in which the common hidden Markov demand shock +is revealed to all firms after a fixed number of periods. + +Thus, + +* Townsend wanted to assume that at time $t$ firms in industry $i$ observe + $k_t^i, Y_t^i, P_t^i, (P^{-i})^t$, where $(P^{-i})^t$ is the history of prices in + the other market up to time $t$. +* Because that turned out to be too challenging, Townsend made an + alternative assumption that eased his calculations: that after a large + number $S$ of periods, firms in industry $i$ observe the + hidden Markov component of the demand shock $\theta_{t-S}$. + +> + +Townsend argued that the more manageable model could do a good job of +approximating the intractable model in which the Markov component of the demand shock remains unobserved +for ever. + +By applying technical machinery of {cite}`PCL`, +{cite}`Pearlman_Sargent2005` showed that there is a recursive +representation of the equilibrium of the perpetually and symmetrically +uninformed model formulated but not completely solved in section 8 of +{cite}`townsend`. + +A reader of {cite}`Pearlman_Sargent2005` will notice that their representation of the equilibrium of +Townsend's model exactly matches that of the **pooling equilibrium** presented here. + +We have structured our notation in this lecture to faciliate comparison of the **pooling equilibrium** +constructed here with the equilibrium of Townsend's model reported in {cite}`Pearlman_Sargent2005`. + +The computational method of {cite}`Pearlman_Sargent2005` is recursive: +it enlists the Kalman filter and invariant subspace methods for +solving systems of Euler +equations [^footnote1] . + +As {cite}`singleton`, +{cite}`kasa`, and {cite}`sargent91` also +found, the equilibrium is fully revealing: observed prices tell +participants in industry $i$ all of the information held by +participants in market $-i$ ($-i$ means not $i$). + +This +means that higher-order beliefs play no role: seeing equilibrium prices +in effect lets decision makers pool their information +sets [^footnote2] . + +The disappearance of higher order beliefs means that +decision makers in this model do not really face a problem of +forecasting the forecasts of others. + +They know those forecasts because +they are the same as their own. + +### Further historical remarks + +{cite}`sargent91` proposed a way to compute an equilibrium +without making Townsend’s approximation. + +Extending the reasoning of {cite}`muth1960`, Sargent noticed that it is possible to +summarize the relevant history with a low dimensional object, namely, a +small number of current and lagged forecasting errors. + +Positing an +equilibrium in a space of perceived laws of motion for endogenous +variables that takes the form of a vector autoregressive, moving +average, Sargent described an equilibrium as a fixed point of a mapping +from the perceived law of motion to the actual law of motion of that +form. + +Sargent worked in the time domain and had to guess and verify the +appropriate orders of the autoregressive and moving average pieces of +the equilibrium representation. + +By working in the frequency +domain {cite}`kasa` showed how to discover the appropriate +orders of the autoregressive and moving average parts, and also how to +compute an equilibrium. + +The {cite}`Pearlman_Sargent2005` recursive computational method, which stays in the time domain, also +discovered appropriate orders of the autoregressive and moving +average pieces. + +In addition, by displaying equilibrium representations +in the form of {cite}`PCL`, {cite}`Pearlman_Sargent2005` +showed how the moving average piece is linked to the innovation process +of the hidden persistent component of the demand shock. + +That scalar +innovation process is the additional state variable contributed by the +problem of extracting a signal from equilibrium prices that decision +makers face in Townsend’s model. + +[^footnote0]: {cite}`Pearlman_Sargent2005` verified this assertion using a different tactic, namely, by constructing +analytic formulas an equilibrium under the incomplete +information structure and confirming that they match the pooling equilibrium formulas derived here. + +[^footnote1]: See {cite}`ahms` for an account of invariant subspace methods. + +[^footnote2]: See {cite}`ahms` for a discussion +of the information assumptions needed to create a situation +in which higher order beliefs appear in equilibrium decision rules. The way +to read our findings in light of {cite}`ams` is that Townsend's +section 8 model has too few sources of random shocks relative +to sources of signals to permit higher order beliefs to +play a role. + +[^footnote3]: See {cite}`Sargent1987`, especially +chapters IX and XIV, for the principles that guide solving some roots backwards and others forwards. + +[^footnote4]: As noted {cite}`Sargent1987`, this difference equation is the Euler equation for +the planning problem of maximizing the discounted sum of consumer plus +producer surplus. + +[^footnote5]: {cite}`Pearlman_Sargent2005` verify the same claim by applying machinery of {cite}`PCL`. + diff --git a/lectures/markov_jump_lq.md b/lectures/markov_jump_lq.md index 32f63e57..9d5042cc 100644 --- a/lectures/markov_jump_lq.md +++ b/lectures/markov_jump_lq.md @@ -135,7 +135,7 @@ $$ With the preceding formulas in mind, we are ready to approach Markov Jump linear quadratic dynamic programming. -## Linked Ricatti equations for Markov LQ dynamic programming +## Linked Riccati equations for Markov LQ dynamic programming The key idea is to make the matrices $A, B, C, R, Q, W$ fixed functions of a finite state $s$ that is governed by an $N$ @@ -762,7 +762,7 @@ $k^{target}_{s_t} \rightarrow k^*_{s_t}$. But when $\lambda \rightarrow 1$, the Markov transition matrix becomes more nearly periodic, so the optimum decision rules target more -at the optimal k level in the other state in order to enjoy higher +at the optimal $k$ level in the other state in order to enjoy higher expected payoff in the next period. The switch happens at $\lambda = 0.5$ when both states are equally diff --git a/lectures/matsuyama.md b/lectures/matsuyama.md index 74ccedae..e34ec308 100644 --- a/lectures/matsuyama.md +++ b/lectures/matsuyama.md @@ -292,7 +292,7 @@ Here $$ \begin{aligned} D_{LL} & := \{ (n_1, n_2) \in \mathbb{R}_{+}^{2} | n_j \leq s_j(\rho) \} \\ - D_{HH} & := \{ (n_1, n_2) \in \mathbb{R}_{+}^{2} | n_j \geq h_j(\rho) \} \\ + D_{HH} & := \{ (n_1, n_2) \in \mathbb{R}_{+}^{2} | n_j \geq h_j(n_k) \} \\ D_{HL} & := \{ (n_1, n_2) \in \mathbb{R}_{+}^{2} | n_1 \geq s_1(\rho) \text{ and } n_2 \leq h_2(n_1) \} \\ D_{LH} & := \{ (n_1, n_2) \in \mathbb{R}_{+}^{2} | n_1 \leq h_1(n_2) \text{ and } n_2 \geq s_2(\rho) \} \end{aligned} diff --git a/lectures/opt_tax_recur.md b/lectures/opt_tax_recur.md index 6839e833..7aa48420 100644 --- a/lectures/opt_tax_recur.md +++ b/lectures/opt_tax_recur.md @@ -74,6 +74,9 @@ import matplotlib.pyplot as plt ## A Competitive Equilibrium with Distorting Taxes +At time $t \geq 0$ a random variable $s_t$ belongs to a time-invariant +set ${\cal S} = [1, 2, \ldots, S]$. + For $t \geq 0$, a history $s^t = [s_t, s_{t-1}, \ldots, s_0]$ of an exogenous state $s_t$ has joint probability density $\pi_t(s^t)$. @@ -271,6 +274,8 @@ q_t^0(s^t) = \beta^{t} \pi_{t}(s^{t}) {u_c(s^{t}) \over u_c(s^0)} ``` +(The stochastic process $\{q_t^0(s^t)\}$ is an instance of what finance economists call a *stochastic discount factor* process.) + Using the first-order conditions {eq}`LSA_taxr` and {eq}`LS101` to eliminate taxes and prices from {eq}`TS_bcPV2`, we derive the *implementability condition* @@ -325,7 +330,7 @@ multipliers on the feasible conditions {eq}`TSs_techr_opt_tax`. Given an initial government debt $b_0$, we want to maximize $J$ with respect to $\{c_t(s^t), n_t(s^t); \forall s^t \}_{t\geq0}$ and to minimize with respect -to $\{\theta(s^t); \forall s^t \}_{t\geq0}$. +to $\Phi$ and with respect to $\{\theta(s^t); \forall s^t \}_{t\geq0}$. The first-order conditions for the Ramsey problem for periods $t \geq 1$ and $t=0$, respectively, are @@ -383,7 +388,7 @@ For convenience, we suppress the time subscript and the index $s^t$ and obtain where we have imposed conditions {eq}`feas1_opt_tax` and {eq}`TSs_techr_opt_tax`. Equation {eq}`TS_barg` is one equation that can be solved to express the -unknown $c$ as a function of the exogenous variable $g$. +unknown $c$ as a function of the exogenous variable $g$ and the Lagrange multiplier $\Phi$. We also know that time $t=0$ quantities $c_0$ and $n_0$ satisfy @@ -400,13 +405,13 @@ We also know that time $t=0$ quantities $c_0$ and $n_0$ satisfy ``` Notice that a counterpart to $b_0$ does *not* appear -in {eq}`TS_barg`, so $c$ does not depend on it for $t \geq 1$. +in {eq}`TS_barg`, so $c$ does not *directly* depend on it for $t \geq 1$. But things are different for time $t=0$. An analogous argument for the $t=0$ equations {eq}`eqFONCRamsey0` leads to one equation that can be solved for $c_0$ as a function of the -pair $(g(s_0), b_0)$. +pair $(g(s_0), b_0)$ and the Lagrange multiplier $\Phi$. These outcomes mean that the following statement would be true even when government purchases are history-dependent functions $g_t(s^t)$ of the @@ -446,10 +451,13 @@ influences $c_0$ and $n_0$, there appears no analogous variable $b_t$ that influences $c_t$ and $n_t$ for $t \geq 1$. -The absence of $b_t$ as a determinant of the Ramsey allocation for +The absence of $b_t$ as a direct determinant of the Ramsey allocation for $t \geq 1$ and its presence for $t=0$ is a symptom of the *time-inconsistency* of a Ramsey plan. +Of course, $b_0$ affects the Ramsey allocation for $t \geq 1$ *indirectly* through +its effect on $\Phi$. + $\Phi$ has to take a value that assures that the household and the government’s budget constraints are both satisfied at a candidate Ramsey allocation and price system associated @@ -471,7 +479,7 @@ $g(s)$ of $s$. We maintain these assumptions throughout the remainder of this lecture. -### Determining the Multiplier +### Determining the Lagrange Multiplier We complete the Ramsey plan by computing the Lagrange multiplier $\Phi$ on the implementability constraint {eq}`TSs_cham1`. @@ -518,7 +526,7 @@ Hence the equation shares much of the structure of a simple asset pricing equati $x_t$ being analogous to the price of the asset at time $t$. We learned earlier that for a Ramsey allocation -$c_t(s^t), n_t(s^t)$ and $b_t(s_t|s^{t-1})$, and therefore +$c_t(s^t), n_t(s^t)$, and $b_t(s_t|s^{t-1})$, and therefore also $x_t(s^t)$, are each functions of $s_t$ only, being independent of the history $s^{t-1}$ for $t \geq 1$. @@ -535,7 +543,7 @@ u_c(s) where $s'$ denotes a next period value of $s$ and $x'(s')$ denotes a next period value of $x$. -Equation {eq}`LSA_budget2` is easy to solve for $x(s)$ for +Given $n(s)$ for $s = $, equation {eq}`LSA_budget2` is easy to solve for $x(s)$ for $s = 1, \ldots , S$. If we let $\vec n, \vec g, \vec x$ @@ -600,7 +608,7 @@ Here is a computational algorithm: In summary, when $g_t$ is a time-invariant function of a Markov state $s_t$, a Ramsey plan can be constructed by solving $3S +3$ -equations in $S$ components each of $\vec c$, $\vec n$, and +equations for $S$ components each of $\vec c$, $\vec n$, and $\vec x$ together with $n_0, c_0$, and $\Phi$. ### Time Inconsistency @@ -630,10 +638,10 @@ We shall discuss this more below. ### Specification with CRRA Utility -In our calculations below and in a {doc}`subsequent lecture ` based on an extension of the Lucas-Stokey model +In our calculations below and in a {doc}`subsequent lecture ` based on an *extension* of the Lucas-Stokey model by Aiyagari, Marcet, Sargent, and Seppälä (2002) {cite}`aiyagari2002optimal`, we shall modify the one-period utility function assumed above. -(We adopted the preceding utility specification because it was the one used in the original {cite}`LucasStokey1983` paper) +(We adopted the preceding utility specification because it was the one used in the original Lucas-Stokey paper {cite}`LucasStokey1983`. We shall soon revert to that specification in a subsequent section.) We will modify their specification by instead assuming that the representative agent has utility function @@ -694,11 +702,10 @@ $b_0$: ```{math} :label: opt_tax_eqn_10 -b_0 + g_0 = \tau_0 (c_0 + g_0) + \frac{\bar b}{R_0} +b_0 + g_0 = \tau_0 (c_0 + g_0) + \beta \sum_{s=1}^S \Pi(s | s_0) \frac{u_c(s)}{u_{c,0}} b_1(s) ``` -where $R_0$ is the gross interest rate for the Markov state $s_0$ that is assumed to prevail at time $t =0$ -and $\tau_0$ is the time $t=0$ tax rate. +where $\tau_0$ is the time $t=0$ tax rate. In equation {eq}`opt_tax_eqn_10`, it is understood that @@ -706,8 +713,7 @@ In equation {eq}`opt_tax_eqn_10`, it is understood that :nowrap: \begin{aligned} -\tau_0 = 1 - \frac{u_{l,0}}{u_{c,0}} \\ -R_0 = \beta \sum_{s=1}^S \Pi(s | s_0) \frac{u_c(s)}{u_{c,0}} +\tau_0 = 1 - \frac{u_{l,0}}{u_{c,0}} \end{aligned} ``` @@ -721,11 +727,14 @@ The above steps are implemented in a class called SequentialAllocation ## Recursive Formulation of the Ramsey Problem -$x_t(s^t) = u_c(s^t) b_t(s_t | s^{t-1})$ in equation {eq}`LSA_budget` +We now temporarily revert to Lucas and Stokey's specification. + +We start by noting that $x_t(s^t) = u_c(s^t) b_t(s_t | s^{t-1})$ in equation {eq}`LSA_budget` appears to be a purely “forward-looking” variable. -But $x_t(s^t)$ is a also a natural candidate for a state variable in -a recursive formulation of the Ramsey problem. +But $x_t(s^t)$ is a natural candidate for a state variable in +a recursive formulation of the Ramsey problem, one that records history-dependence and so is +``backward-looking''. ### Intertemporal Delegation @@ -733,8 +742,8 @@ To express a Ramsey plan recursively, we imagine that a time $0$ Ramsey planner is followed by a sequence of continuation Ramsey planners at times $t = 1, 2, \ldots$. -A “continuation Ramsey planner” at times $t \geq 1$ has a -different objective function and faces different constraints and state variabls than does the +A “continuation Ramsey planner” at time $t \geq 1$ has a +different objective function and faces different constraints and state variables than does the Ramsey planner at time $t =0$. A key step in representing a Ramsey plan recursively is @@ -762,12 +771,12 @@ Furthermore, the Ramsey planner cares about $(c_0(s_0), \ell_0(s_0))$, while continuation Ramsey planners do not. The time $0$ Ramsey planner -hands a state-contingent function that make $x_1$ a function of $s_1$ to a time $1$ +hands a state-contingent function that make $x_1$ a function of $s_1$ to a time $1$, state $s_1$ continuation Ramsey planner. These lines of delegated authorities and responsibilities across time express the continuation Ramsey planners’ -obligations to implement their parts of the original Ramsey plan, +obligations to implement their parts of an original Ramsey plan that had been designed once-and-for-all at time $0$. ### Two Bellman Equations @@ -779,7 +788,7 @@ $(x_t, s_t)$. * Let $V(x, s)$ be the value of a **continuation Ramsey plan** at $x_t = x, s_t =s$ for $t \geq 1$. * Let $W(b, s)$ be the value of a **Ramsey plan** at time $0$ at $b_0=b$ and $s_0 = s$. -We work backward by presenting a Bellman equation for +We work backward by preparing a Bellman equation for $V(x,s)$ first, then a Bellman equation for $W(b,s)$. ### The Continuation Ramsey Problem @@ -795,7 +804,7 @@ V(x, s) = \max_{n, \{x'(s')\}} u(n-g(s), 1-n) + \beta \sum_{s'\in S} \Pi(s'| s) where maximization over $n$ and the $S$ elements of $x'(s')$ is subject to the single implementability constraint for -$t \geq 1$. +$t \geq 1$: ```{math} :label: LSA_Bellman1cons @@ -823,7 +832,7 @@ are $S+1$ time-invariant policy functions ### The Ramsey Problem -The Bellman equation for the time $0$ Ramsey planner is +The Bellman equation of the time $0$ Ramsey planner is ```{math} :label: LSA_Bellman2 @@ -865,7 +874,7 @@ continuation Ramsey planners. The value function $V(x_t, s_t)$ of the time $t$ continuation Ramsey planner equals $E_t \sum_{\tau = t}^\infty \beta^{\tau - t} u(c_t, l_t)$, where -the consumption and leisure processes are evaluated along the original +consumption and leisure processes are evaluated along the original time $0$ Ramsey plan. ### First-Order Conditions @@ -873,7 +882,7 @@ time $0$ Ramsey plan. Attach a Lagrange multiplier $\Phi_1(x,s)$ to constraint {eq}`LSA_Bellman1cons` and a Lagrange multiplier $\Phi_0$ to constraint {eq}`Bellman2cons`. -Time $t \geq 1$: the first-order conditions for the time $t \geq 1$ constrained +Time $t \geq 1$: First-order conditions for the time $t \geq 1$ constrained maximization problem on the right side of the continuation Ramsey planner’s Bellman equation {eq}`LSA_Bellman1` are @@ -947,7 +956,7 @@ formulated the Ramsey plan in the space of sequences. ### State Variable Degeneracy -Equations {eq}`LSAx0` and {eq}`LSAn0` imply that $\Phi_0 = \Phi_1$ +Equations {eq}`LSAenv` and {eq}`LSAx0` imply that $\Phi_0 = \Phi_1$ and that ```{math} @@ -1012,7 +1021,7 @@ through them, the value of initial government debt $b_0$. ### Recursive Implementation -The above steps are implemented in a class called RecursiveAllocation +The above steps are implemented in a class called `RecursiveAllocation`. ```{code-cell} python3 :file: _static/lecture_specific/opt_tax_recur/recursive_allocation.py @@ -1020,6 +1029,8 @@ The above steps are implemented in a class called RecursiveAllocation ## Examples +We return to the setup with CRRA preferences described above. + ### Anticipated One-Period War This example illustrates in a simple setting how a Ramsey planner manages risk. @@ -1064,7 +1075,7 @@ and set $\sigma = 2$, $\gamma = 2$, and the discount factor $\beta = 0.9$. Note: For convenience in terms of matching our code, we have expressed utility as a function of $n$ rather than leisure $l$. -This utility function is implemented in the class CRRAutility +This utility function is implemented in the class `CRRAutility`. ```{code-cell} python3 :file: _static/lecture_specific/opt_tax_recur/crra_utility.py diff --git a/lectures/orth_proj.md b/lectures/orth_proj.md index 0a4cc757..78e19ae6 100644 --- a/lectures/orth_proj.md +++ b/lectures/orth_proj.md @@ -133,7 +133,7 @@ While the converse is not true, a kind of partial converse holds, as we'll {ref} What vector within a linear subspace of $\mathbb R^n$ best approximates a given vector in $\mathbb R^n$? -The next theorem provides answer to this question. +The next theorem answers this question. **Theorem** (OPT) Given $y \in \mathbb R^n$ and linear subspace $S \subset \mathbb R^n$, there exists a unique solution to the minimization problem @@ -287,7 +287,7 @@ Combining this result with {eq}`pob` verifies the claim. ### Projection onto an Orthonormal Basis -When the subspace onto which are projecting is orthonormal, computing the projection simplifies: +When a subspace onto which we project is orthonormal, computing the projection simplifies: **Theorem** If $\{u_1, \ldots, u_k\}$ is an orthonormal basis for $S$, then @@ -305,7 +305,7 @@ Clearly, $P y \in S$. We claim that $y - P y \perp S$ also holds. -It sufficies to show that $y - P y \perp$ any basis vector $u_i$ (why?). +It sufficies to show that $y - P y \perp$ any basis vector $u_i$. This is true because @@ -315,7 +315,9 @@ $$ \langle u_i, u_j \rangle = 0 $$ -## Projection Using Matrix Algebra +(Why is this sufficient to establish the claim that $y - P y \perp S$?) + +## Projection Via Matrix Algebra Let $S$ be a linear subspace of $\mathbb R^n$ and let $y \in \mathbb R^n$. @@ -349,7 +351,7 @@ a := (X' X)^{-1} X' y $$ An expression of the form $X a$ is precisely a linear combination of the -columns of $X$, and hence an element of $S$. +columns of $X$ and hence an element of $S$. Claim 2 is equivalent to the statement @@ -359,7 +361,7 @@ y - X (X' X)^{-1} X' y \, \perp\, X b b \in \mathbb R^K $$ -This is true: If $b \in \mathbb R^K$, then +To verify this, notice that if $b \in \mathbb R^K$, then $$ (X b)' [y - X (X' X)^{-1} X' @@ -413,9 +415,9 @@ basis. ### Application: Overdetermined Systems of Equations -Let $y \in \mathbb R^n$ and let $X$ is $n \times k$ with linearly independent columns. +Let $y \in \mathbb R^n$ and let $X$ be $n \times k$ with linearly independent columns. -Given $X$ and $y$, we seek $b \in \mathbb R^k$ satisfying the system of linear equations $X b = y$. +Given $X$ and $y$, we seek $b \in \mathbb R^k$ that satisfies the system of linear equations $X b = y$. If $n > k$ (more equations than unknowns), then $b$ is said to be **overdetermined**. @@ -426,9 +428,9 @@ The best approach here is to * Accept that an exact solution may not exist. * Look instead for an approximate solution. -By approximate solution, we mean a $b \in \mathbb R^k$ such that $X b$ is as close to $y$ as possible. +By approximate solution, we mean a $b \in \mathbb R^k$ such that $X b$ is close to $y$. -The next theorem shows that the solution is well defined and unique. +The next theorem shows that a best approximation is well defined and unique. The proof uses the OPT. @@ -492,7 +494,7 @@ The set $\mathcal{F}$ is sometimes called the hypothesis space. The theory of statistical learning tells us that to prevent overfitting we should take the set $\mathcal{F}$ to be relatively simple. -If we let $\mathcal{F}$ be the class of linear functions $1/N$, the problem is +If we let $\mathcal{F}$ be the class of linear functions, the problem is $$ \min_{b \in \mathbb R^K} \; @@ -525,7 +527,7 @@ x_n := x_{nK} \end{array} \right) -= \text{ :math:`n`-th obs on all regressors} += n\text{-th obs on all regressors} $$ and @@ -663,7 +665,7 @@ Some rearranging gives $X = Q R$. ### Linear Regression via QR Decomposition -For matrices $X$ and $y$ that overdetermine $beta$ in the linear +For matrices $X$ and $y$ that overdetermine $\beta$ in the linear equation system $y = X \beta$, we found the least squares approximator $\hat \beta = (X' X)^{-1} X' y$. Using the QR decomposition $X = Q R$ gives @@ -726,7 +728,7 @@ $$ ### Exercise 1 If $x \in S$ and $x \in S^\perp$, then we have in particular -that $\langle x, x \rangle = 0$, ut then $x = 0$. +that $\langle x, x \rangle = 0$, but then $x = 0$. ### Exercise 2 diff --git a/lectures/permanent_income_dles.md b/lectures/permanent_income_dles.md index c84b8b6f..ed832d9a 100644 --- a/lectures/permanent_income_dles.md +++ b/lectures/permanent_income_dles.md @@ -42,8 +42,8 @@ tags: [hide-output] This lecture adds a third solution method for the linear-quadratic-Gaussian permanent income model with $\beta R = 1$, complementing the other two solution methods described in [Optimal Savings I: The Permanent Income Model](https://python-intro.quantecon.org/perm_income.html) and -[Optimal Savings II: LQ Techniques](https://python-intro.quantecon.org/perm_income_cons.html) and this Jupyter -notebook [http://nbviewer.jupyter.org/github/QuantEcon/QuantEcon.notebooks/blob/master/permanent_income.ipynb](http://nbviewer.jupyter.org/github/QuantEcon/QuantEcon.notebooks/blob/master/permanent_income.ipynb). +[Optimal Savings II: LQ Techniques](https://python-intro.quantecon.org/perm_income_cons.html) and [this Jupyter +notebook](http://nbviewer.jupyter.org/github/QuantEcon/QuantEcon.notebooks/blob/master/permanent_income.ipynb). The additional solution method uses the **DLE** class. @@ -274,8 +274,8 @@ $\begin{bmatrix} z_t \\ b_t \end{bmatrix}$. Consequently, the relevant elements of econ1.Sc are the same as in $-F$ occur when we apply other approaches to the same model in the lecture -[Optimal Savings II: LQ Techniques](https://python-intro.quantecon.org/perm_income_cons.html) and this Jupyter -notebook [http://nbviewer.jupyter.org/github/QuantEcon/QuantEcon.notebooks/blob/master/permanent_income.ipynb](http://nbviewer.jupyter.org/github/QuantEcon/QuantEcon.notebooks/blob/master/permanent_income.ipynb). +[Optimal Savings II: LQ Techniques](https://python-intro.quantecon.org/perm_income_cons.html) and [this Jupyter +notebook](http://nbviewer.jupyter.org/github/QuantEcon/QuantEcon.notebooks/blob/master/permanent_income.ipynb). The plot below quickly replicates the first two figures of that lecture and that notebook to confirm that the solutions are the same diff --git a/lectures/rosen_schooling_model.md b/lectures/rosen_schooling_model.md index ecfe8503..4664349b 100644 --- a/lectures/rosen_schooling_model.md +++ b/lectures/rosen_schooling_model.md @@ -120,7 +120,7 @@ k+1 matrix, and $\Theta_h$ is a k+1 x 1 matrix. This specification sets $N_t = h_{1t-1}$, $n_t = c_t$, $h_{\tau+1,t-1} = n_{t-(k-\tau)}$ for $\tau = 1,...,k$. -Below we set things up so that the number of years of education, k, can +Below we set things up so that the number of years of education, $k$, can be varied. ### Technology @@ -232,8 +232,8 @@ econ1 = DLE(info1, tech1, pref1) We create three other instances by: 1. Raising $\alpha_d$ to 2 -1. Raising k to 7 -1. Raising k to 10 +1. Raising $k$ to 7 +1. Raising $k$ to 10 ```{code-cell} python3 α_d = np.array([[2]]) @@ -330,7 +330,7 @@ ax2.set_title('Response of $N_t$ to a demand shock') plt.show() ``` -Both panels in the above figure show that raising k lowers the effect of +Both panels in the above figure show that raising $k$ lowers the effect of a positive demand shock on entry into the engineering profession. Increasing the number of periods of schooling lowers diff --git a/lectures/smoothing.md b/lectures/smoothing.md index 6af3ce30..81ce6339 100644 --- a/lectures/smoothing.md +++ b/lectures/smoothing.md @@ -40,7 +40,7 @@ tags: [hide-output] This lecture describes two types of consumption-smoothing models. -* one is in the **complete markets** tradition of Kenneth Arrow +* one is in the **complete markets** tradition of [Kenneth Arrow](https://en.wikipedia.org/wiki/Kenneth_Arrow) * the other is in the **incomplete markets** tradition of Hall {cite}`Hall1978` *Complete markets* allow a consumer to buy or sell claims contingent on all possible states of the world. @@ -192,10 +192,16 @@ $$ Please note that $$ -E_t b_{t+1} = \int \phi_{t+1}(x_{t+1} | A x_t, C C') b_{t+1}(x_{t+1}) d x_{t+1} +\beta E_t b_{t+1} = \beta \int \phi_{t+1}(x_{t+1} | A x_t, C C') b_{t+1}(x_{t+1}) d x_{t+1} $$ -which verifies that $E_t b_{t+1}$ is the **value** of time $t+1$ state-contingent claims on time $t+1$ consumption issued by the consumer at time $t$ +or + +$$ +\beta E_t b_{t+1} = \int q_{t+1}(x_{t+1}| x_t) b_{t+1}(x_{t+1}) d x_{t+1} +$$ + +which verifies that $\beta E_t b_{t+1}$ is the **value** of time $t+1$ state-contingent claims on time $t+1$ consumption issued by the consumer at time $t$ We can solve the time $t$ budget constraint forward to obtain @@ -220,7 +226,7 @@ $$ But in the complete markets version, it is tractable to assume a more general utility function that satisfies $u' > 0$ and $u'' < 0$. -The first-order conditions for the consumer's problem with complete +First-order conditions for the consumer's problem with complete markets and our assumption about Arrow securities prices are $$ @@ -615,7 +621,7 @@ consumption $\bar c$ and indicated how that level depends on the underlying spec ### Code -Here's some code that, among other things, contains a function called consumption_complete(). +Here's some code that, among other things, contains a function called `consumption_complete()`. This function computes $\{ b(i) \}_{i=1}^{N}, \bar c$ as outcomes given a set of parameters for the general case with $N$ Markov states under the assumption of complete markets @@ -896,7 +902,7 @@ markets model ### The Incomplete Markets Model -The code above also contains a function called consumption_incomplete() that uses {eq}`cs_12` and {eq}`cs_13` to +The code above also contains a function called `consumption_incomplete()` that uses {eq}`cs_12` and {eq}`cs_13` to * simulate paths of $y_t, c_t, b_{t+1}$ * plot these against values of $\bar c, b(s_1), b(s_2)$ found in a corresponding complete markets economy diff --git a/lectures/smoothing_tax.md b/lectures/smoothing_tax.md index 825bf2b5..c639d2c7 100644 --- a/lectures/smoothing_tax.md +++ b/lectures/smoothing_tax.md @@ -105,7 +105,7 @@ To exploit the isomorphism between consumption-smoothing and tax-smoothing model ### Code -Among other things, this code contains a function called consumption_complete(). +Among other things, this code contains a function called `consumption_complete()`. This function computes $\{ b(i) \}_{i=1}^{N}, \bar c$ as outcomes given a set of parameters for the general case with $N$ Markov states under the assumption of complete markets @@ -235,7 +235,7 @@ def consumption_incomplete(cp, s_path): ### Revisiting the consumption-smoothing model -The code above also contains a function called consumption_incomplete() that uses {eq}`cs_12` and {eq}`cs_13` to +The code above also contains a function called `consumption_incomplete()` that uses {eq}`cs_12` and {eq}`cs_13` to * simulate paths of $y_t, c_t, b_{t+1}$ * plot these against values of $\bar c, b(s_1), b(s_2)$ found in a corresponding complete markets economy diff --git a/lectures/von_neumann_model.md b/lectures/von_neumann_model.md index c7f13bd1..0aec38e4 100644 --- a/lectures/von_neumann_model.md +++ b/lectures/von_neumann_model.md @@ -160,8 +160,8 @@ class Neumann(object): f = lambda α: ((B - α * A) @ np.ones((n, 1))).max() g = lambda β: (np.ones((1, m)) @ (B - β * A)).min() - UB = np.asscalar(fsolve(f, 1)) # Upper bound for α, β - LB = np.asscalar(fsolve(g, 2)) # Lower bound for α, β + UB = fsolve(f, 1).item() # Upper bound for α, β + LB = fsolve(g, 2).item() # Lower bound for α, β return LB, UB @@ -220,7 +220,7 @@ class Neumann(object): b_eq = 1 res = linprog(c, A_ub=A_iq, b_ub=b_iq, A_eq=A_eq, b_eq=b_eq, - bounds=bounds, options=dict(bland=True, tol=1e-7)) + bounds=bounds) else: # Solve the dual LP (for details see the description) @@ -238,7 +238,7 @@ class Neumann(object): b_eq = 1 res = linprog(c, A_ub=A_iq, b_ub=b_iq, A_eq=A_eq, b_eq=b_eq, - bounds=bounds, options=dict(bland=True, tol=1e-7)) + bounds=bounds) if res.status != 0: print(res.message) @@ -567,7 +567,7 @@ The maximal value is called the *technological expansion rate* and is denoted by $\alpha_0$. The associated intensity vector $x_0$ is the *optimal intensity vector*. -**Definition:** The economic expansion problem* (EEP) for +**Definition:** The economic expansion problem (EEP) for $(A,B)$ is to find a semi-positive $n$-vector $p>0$ and a number $\beta\in\mathbb{R}$ that satisfy @@ -641,7 +641,7 @@ $\gamma^{*}$ (i.e., if it is *oversupplied*), then its price must be zero; and that if any activity provides negative profit, it must be unused. -Therefore, the conditions staed in Theorem I ex encode all equilibrium conditions. +Therefore, the conditions stated in Theorem I ex encode all equilibrium conditions. So Theorem I essentially states that under Assumptions I and II there always exists an equilibrium $\left(\gamma^{*}, x_0, p_0\right)$ @@ -757,11 +757,13 @@ V(-A) < 0\quad\quad \text{and}\quad\quad V(B)>0 $$ ```{note} -*Proof (Sketch)*: * $\Rightarrow$ $V(B)>0$ implies +*Proof (Sketch)*: +* $\Rightarrow$ $V(B)>0$ implies $x_0^T B \gg \mathbf{0}$, where $x_0$ is a maximizing vector. Since $B$ is non-negative, this requires that each column of $B$ has at least one positive entry, which is -Assumption I. * $\Leftarrow$ From Assumption I and the fact +Assumption I. +* $\Leftarrow$ From Assumption I and the fact that $p>\mathbf{0}$, it follows that $Bp > \mathbf{0}$. This implies that the maximizing player can always choose $x$ so that $x^TBp>0$ so that it must be the case