diff --git a/examples/3dplot.py b/examples/3dplot.py index 12a6c5d09..753f721d0 100644 --- a/examples/3dplot.py +++ b/examples/3dplot.py @@ -3,14 +3,15 @@ import numpy as np from matplotlib import cm + def f(x, y): return np.cos(x**2 + y**2) / (1 + x**2 + y**2) xgrid = np.linspace(-3, 3, 50) ygrid = xgrid -x, y = np.meshgrid(xgrid, ygrid) +x, y = np.meshgrid(xgrid, ygrid) -fig = plt.figure(figsize=(8,6)) +fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection='3d') ax.plot_surface(x, y, diff --git a/examples/3dvec.py b/examples/3dvec.py index 4669e4137..af9d7cfe7 100644 --- a/examples/3dvec.py +++ b/examples/3dvec.py @@ -59,7 +59,5 @@ def f(x, y): x2, y2 = np.meshgrid(xr2, yr2) z2 = f(x2, y2) ax.plot_surface(x2, y2, z2, rstride=1, cstride=1, cmap=cm.jet, - linewidth=0, antialiased=True, alpha=0.2) + linewidth=0, antialiased=True, alpha=0.2) plt.show() - - diff --git a/examples/ar1_sd.py b/examples/ar1_sd.py index 489a1f55e..2f5e49c9c 100644 --- a/examples/ar1_sd.py +++ b/examples/ar1_sd.py @@ -4,6 +4,7 @@ import numpy as np import matplotlib.pyplot as plt + def ar1_sd(phi, omega): return 1 / (1 - 2 * phi * np.cos(omega) + phi**2) diff --git a/examples/beta-binomial.py b/examples/beta-binomial.py index 05b7e756b..09c9bdc59 100644 --- a/examples/beta-binomial.py +++ b/examples/beta-binomial.py @@ -7,6 +7,7 @@ import matplotlib.pyplot as plt import numpy as np + def gen_probs(n, a, b): probs = np.zeros(n+1) for k in range(n+1): @@ -22,4 +23,3 @@ def gen_probs(n, a, b): ax.plot(list(range(0, n+1)), gen_probs(n, a, b), '-o', label=ab_label) ax.legend() plt.show() - diff --git a/examples/binom_df.py b/examples/binom_df.py index d092eb59d..c41e48ea9 100644 --- a/examples/binom_df.py +++ b/examples/binom_df.py @@ -1,4 +1,3 @@ -import numpy as np import matplotlib.pyplot as plt from scipy.stats import binom @@ -6,7 +5,7 @@ plt.subplots_adjust(hspace=0.4) axes = axes.flatten() ns = [1, 2, 4, 8] -dom = list(range(9)) +dom = list(range(9)) for ax, n in zip(axes, ns): b = binom(n, 0.5) @@ -18,4 +17,3 @@ ax.set_title(r'$n = {}$'.format(n)) fig.show() - diff --git a/examples/boxplot_example.py b/examples/boxplot_example.py index f3329bd68..d3c797986 100644 --- a/examples/boxplot_example.py +++ b/examples/boxplot_example.py @@ -11,7 +11,5 @@ ax.boxplot([x, y, z]) ax.set_xticks((1, 2, 3)) ax.set_ylim(-2, 14) -ax.set_xticklabels((r'$X$', r'$Y$', r'$Z$'), fontsize=16) +ax.set_xticklabels((r'$X$', r'$Y$', r'$Z$'), fontsize=16) plt.show() - - diff --git a/examples/career_vf_plot.py b/examples/career_vf_plot.py index c3466ddff..31c96b60b 100644 --- a/examples/career_vf_plot.py +++ b/examples/career_vf_plot.py @@ -19,7 +19,7 @@ v = qe.compute_fixed_point(wp.bellman_operator, v_init) # === plot value function === # -fig = plt.figure(figsize=(8,6)) +fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection='3d') tg, eg = np.meshgrid(wp.theta, wp.epsilon) ax.plot_surface(tg, diff --git a/examples/cauchy_samples.py b/examples/cauchy_samples.py index a07e87206..c0a3e0d94 100644 --- a/examples/cauchy_samples.py +++ b/examples/cauchy_samples.py @@ -21,11 +21,9 @@ sample_mean[i] = np.mean(data[:i]) # == Plot == # - ax.plot(list(range(n)), sample_mean, 'r-', lw=3, alpha=0.6, label=r'$\bar X_n$') + ax.plot(list(range(n)), sample_mean, 'r-', lw=3, alpha=0.6, + label=r'$\bar X_n$') ax.plot(list(range(n)), [0] * n, 'k--', lw=0.5) ax.legend() fig.show() - - - diff --git a/examples/clt3d.py b/examples/clt3d.py index b0566fda1..0834c3787 100644 --- a/examples/clt3d.py +++ b/examples/clt3d.py @@ -8,24 +8,25 @@ """ import numpy as np -from scipy.stats import beta, bernoulli, gaussian_kde +from scipy.stats import beta, gaussian_kde from mpl_toolkits.mplot3d import Axes3D from matplotlib.collections import PolyCollection import matplotlib.pyplot as plt -beta_dist = beta(2, 2) +beta_dist = beta(2, 2) + def gen_x_draws(k): """ - Returns a flat array containing k independent draws from the + Returns a flat array containing k independent draws from the distribution of X, the underlying random variable. This distribution is itself a convex combination of three beta distributions. """ bdraws = beta_dist.rvs((3, k)) # == Transform rows, so each represents a different distribution == # - bdraws[0,:] -= 0.5 - bdraws[1,:] += 0.6 - bdraws[2,:] -= 1.1 + bdraws[0, :] -= 0.5 + bdraws[1, :] += 0.6 + bdraws[2, :] -= 1.1 # == Set X[i] = bdraws[j, i], where j is a random draw from {0, 1, 2} == # js = np.random.random_integers(0, 2, size=k) X = bdraws[js, np.arange(k)] @@ -40,7 +41,7 @@ def gen_x_draws(k): # == Form a matrix Z such that each column is reps independent draws of X == # Z = np.empty((reps, nmax)) for i in range(nmax): - Z[:,i] = gen_x_draws(reps) + Z[:, i] = gen_x_draws(reps) # == Take cumulative sum across columns S = Z.cumsum(axis=1) # == Multiply j-th column by sqrt j == # @@ -52,23 +53,23 @@ def gen_x_draws(k): ax = fig.gca(projection='3d') a, b = -3, 3 -gs = 100 +gs = 100 xs = np.linspace(a, b, gs) # == Build verts == # greys = np.linspace(0.3, 0.7, nmax) verts = [] for n in ns: - density = gaussian_kde(Y[:,n-1]) + density = gaussian_kde(Y[:, n-1]) ys = density(xs) verts.append(list(zip(xs, ys))) -poly = PolyCollection(verts, facecolors = [str(g) for g in greys]) +poly = PolyCollection(verts, facecolors=[str(g) for g in greys]) poly.set_alpha(0.85) ax.add_collection3d(poly, zs=ns, zdir='x') -#ax.text(np.mean(rhos), a-1.4, -0.02, r'$\beta$', fontsize=16) -#ax.text(np.max(rhos)+0.016, (a+b)/2, -0.02, r'$\log(y)$', fontsize=16) +# ax.text(np.mean(rhos), a-1.4, -0.02, r'$\beta$', fontsize=16) +# ax.text(np.max(rhos)+0.016, (a+b)/2, -0.02, r'$\log(y)$', fontsize=16) ax.set_xlim3d(1, nmax) ax.set_xticks(ns) ax.set_xlabel("n") @@ -77,5 +78,3 @@ def gen_x_draws(k): ax.set_zlim3d(0, 0.4) ax.set_zticks((0.2, 0.4)) plt.show() - - diff --git a/examples/dice.py b/examples/dice.py index e19740fcb..115be4940 100644 --- a/examples/dice.py +++ b/examples/dice.py @@ -4,13 +4,13 @@ import random + class Dice: faces = (1, 2, 3, 4, 5, 6) def __init__(self): - self.current_face = 1 + self.current_face = 1 def roll(self): self.current_face = random.choice(Dice.faces) - diff --git a/examples/duopoly_lqnash.py b/examples/duopoly_lqnash.py index a6071d9d4..b2781bfd2 100644 --- a/examples/duopoly_lqnash.py +++ b/examples/duopoly_lqnash.py @@ -10,20 +10,15 @@ """ from __future__ import division -import sys -import numpy as np -import scipy.linalg as la -import matplotlib.pyplot as plt -from numpy import sqrt, max, eye, dot, zeros, cumsum, array -from numpy.random import randn +from numpy import array, eye from quantecon.lqnash import nnash -#---------------------------------------------------------------------# +# ---------------------------------------------------------------------# # Set up parameter values and LQ matrices # Remember state is x_t = [1, y_{1, t}, y_{2, t}] and # control is u_{i, t} = [y_{i, t+1} - y_{i, t}] -#---------------------------------------------------------------------# +# ---------------------------------------------------------------------# a0 = 10. a1 = 1. beta = 1. @@ -45,9 +40,9 @@ q2 = array([[-.5*d]]) -#---------------------------------------------------------------------# +# ---------------------------------------------------------------------# # Solve using QE's nnash function -#---------------------------------------------------------------------# +# ---------------------------------------------------------------------# f1, f2, p1, p2 = nnash(a, b1, b2, r1, r2, q1, q2, 0., 0., 0., 0., 0., 0., tol=1e-8, max_iter=1000) diff --git a/examples/duopoly_mpe.py b/examples/duopoly_mpe.py index 54b65683f..618e1d19d 100644 --- a/examples/duopoly_mpe.py +++ b/examples/duopoly_mpe.py @@ -39,11 +39,10 @@ # == Solve using QE's nnash function == # F1, F2, P1, P2 = qe.nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, - beta=beta) + beta=beta) # == Display policies == # print("Computed policies for firm 1 and firm 2:\n") print("F1 = {}".format(F1)) print("F2 = {}".format(F2)) print("\n") - diff --git a/examples/eigenvec.py b/examples/eigenvec.py index 813e821cc..3af24e498 100644 --- a/examples/eigenvec.py +++ b/examples/eigenvec.py @@ -13,7 +13,7 @@ (2, 1)) A = np.array(A) evals, evecs = eig(A) -evecs = evecs[:,0], evecs[:,1] +evecs = evecs[:, 0], evecs[:, 1] fig, ax = plt.subplots() # Set the axes through the origin @@ -22,30 +22,30 @@ for spine in ['right', 'top']: ax.spines[spine].set_color('none') ax.grid(alpha=0.4) - + xmin, xmax = -3, 3 ymin, ymax = -3, 3 ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) -#ax.set_xticks(()) -#ax.set_yticks(()) +# ax.set_xticks(()) +# ax.set_yticks(()) # Plot each eigenvector for v in evecs: - ax.annotate('', xy=v, xytext=(0, 0), - arrowprops=dict(facecolor='blue', - shrink=0, - alpha=0.6, - width=0.5)) + ax.annotate('', xy=v, xytext=(0, 0), + arrowprops=dict(facecolor='blue', + shrink=0, + alpha=0.6, + width=0.5)) # Plot the image of each eigenvector for v in evecs: v = np.dot(A, v) - ax.annotate('', xy=v, xytext=(0, 0), - arrowprops=dict(facecolor='red', - shrink=0, - alpha=0.6, - width=0.5)) + ax.annotate('', xy=v, xytext=(0, 0), + arrowprops=dict(facecolor='red', + shrink=0, + alpha=0.6, + width=0.5)) # Plot the lines they run through x = np.linspace(xmin, xmax, 3) @@ -55,4 +55,3 @@ plt.show() - diff --git a/examples/evans_sargent.py b/examples/evans_sargent.py index 71b779744..27b28a4e1 100644 --- a/examples/evans_sargent.py +++ b/examples/evans_sargent.py @@ -22,6 +22,7 @@ from quantecon.matrix_eqn import solve_discrete_lyapunov from scipy.optimize import root + def computeG(A0, A1, d, Q0, tau0, beta, mu): """ Compute government income given mu and return tax revenues and @@ -107,6 +108,7 @@ def computeG(A0, A1, d, Q0, tau0, beta, mu): Q0 = 1000.0 tau0 = 0.0 + def gg(mu): """ Computes the tax revenues for the government given Lagrangian @@ -175,4 +177,3 @@ def gg(mu): print(y) print("-F") print(-F) - diff --git a/examples/evans_sargent_plot1.py b/examples/evans_sargent_plot1.py index fab6c5546..a9b34bc9d 100644 --- a/examples/evans_sargent_plot1.py +++ b/examples/evans_sargent_plot1.py @@ -7,7 +7,7 @@ """ import numpy as np import matplotlib.pyplot as plt -from evans_sargent import T, y +from evans_sargent import T, y tt = np.arange(T) # tt is used to make the plot time index correct. @@ -20,8 +20,8 @@ ax.set_xlim(0, 15) bbox = (0., 1.02, 1., .102) -legend_args = {'bbox_to_anchor' : bbox, 'loc' : 3, 'mode' : 'expand'} -p_args = {'lw' : 2, 'alpha' : 0.7} +legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} +p_args = {'lw': 2, 'alpha': 0.7} ax = axes[0] ax.plot(tt, y[1, :], 'b-', label="output", **p_args) @@ -42,4 +42,3 @@ ax.set_xlabel(r'time', fontsize=16) plt.show() - diff --git a/examples/evans_sargent_plot2.py b/examples/evans_sargent_plot2.py index cee4b7f98..dcf9923b0 100644 --- a/examples/evans_sargent_plot2.py +++ b/examples/evans_sargent_plot2.py @@ -7,7 +7,7 @@ """ import numpy as np import matplotlib.pyplot as plt -from evans_sargent import T, y, uhatdif, tauhatdif, mu, G, GPay +from evans_sargent import T, uhatdif, tauhatdif, mu, G tt = np.arange(T) # tt is used to make the plot time index correct. tt2 = np.arange(T-1) @@ -21,18 +21,20 @@ ax.set_xlim(-0.5, 15) bbox = (0., 1.02, 1., .102) -legend_args = {'bbox_to_anchor' : bbox, 'loc' : 3, 'mode' : 'expand'} -p_args = {'lw' : 2, 'alpha' : 0.7} +legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} +p_args = {'lw': 2, 'alpha': 0.7} ax = axes[0] -ax.plot(tt2, tauhatdif, label=r'time inconsistency differential for tax rate', **p_args) +ax.plot(tt2, tauhatdif, label=r'time inconsistency differential for tax rate', + **p_args) ax.set_ylabel(r"$\Delta\tau$", fontsize=16) ax.set_ylim(-0.1, 1.4) ax.set_yticks((0.0, 0.4, 0.8, 1.2)) ax.legend(ncol=1, **legend_args) ax = axes[1] -ax.plot(tt, uhatdif, label=r'time inconsistency differential for $u$', **p_args) +ax.plot(tt, uhatdif, label=r'time inconsistency differential for $u$', + **p_args) ax.set_ylabel(r"$\Delta u$", fontsize=16) ax.set_ylim(-3, .1) ax.set_yticks((-3.0, -2.0, -1.0, 0.0)) @@ -55,4 +57,4 @@ ax.set_xlabel(r'time', fontsize=16) plt.show() -#lines = plt.plot(tt, GPay, "o") +# lines = plt.plot(tt, GPay, "o") diff --git a/examples/gaussian_contours.py b/examples/gaussian_contours.py index ee4191586..9b24abbb9 100644 --- a/examples/gaussian_contours.py +++ b/examples/gaussian_contours.py @@ -2,7 +2,7 @@ Filename: gaussian_contours.py Authors: John Stachurski and Thomas Sargent -Plots of bivariate Gaussians to illustrate the Kalman filter. +Plots of bivariate Gaussians to illustrate the Kalman filter. """ from scipy import linalg @@ -31,11 +31,12 @@ y_grid = np.linspace(-3.1, 1.7, 100) X, Y = np.meshgrid(x_grid, y_grid) + def gen_gaussian_plot_vals(mu, C): "Z values for plotting the bivariate Gaussian N(mu, C)" m_x, m_y = float(mu[0]), float(mu[1]) - s_x, s_y = np.sqrt(C[0,0]), np.sqrt(C[1,1]) - s_xy = C[0,1] + s_x, s_y = np.sqrt(C[0, 0]), np.sqrt(C[1, 1]) + s_xy = C[0, 1] return bivariate_normal(X, Y, s_x, s_y, m_x, m_y, s_xy) fig, ax = plt.subplots() @@ -44,12 +45,14 @@ def gen_gaussian_plot_vals(mu, C): # == Code for the 4 plots, choose one below == # + def plot1(): Z = gen_gaussian_plot_vals(x_hat, Sigma) ax.contourf(X, Y, Z, 6, alpha=0.6, cmap=cm.jet) cs = ax.contour(X, Y, Z, 6, colors="black") ax.clabel(cs, inline=1, fontsize=10) + def plot2(): Z = gen_gaussian_plot_vals(x_hat, Sigma) ax.contourf(X, Y, Z, 6, alpha=0.6, cmap=cm.jet) @@ -57,19 +60,21 @@ def plot2(): ax.clabel(cs, inline=1, fontsize=10) ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black") + def plot3(): Z = gen_gaussian_plot_vals(x_hat, Sigma) cs1 = ax.contour(X, Y, Z, 6, colors="black") ax.clabel(cs1, inline=1, fontsize=10) M = Sigma * G.T * linalg.inv(G * Sigma * G.T + R) - x_hat_F = x_hat + M * (y - G * x_hat) - Sigma_F = Sigma - M * G * Sigma + x_hat_F = x_hat + M * (y - G * x_hat) + Sigma_F = Sigma - M * G * Sigma new_Z = gen_gaussian_plot_vals(x_hat_F, Sigma_F) cs2 = ax.contour(X, Y, new_Z, 6, colors="black") ax.clabel(cs2, inline=1, fontsize=10) ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet) ax.text(float(y[0]), float(y[1]), r"$y$", fontsize=20, color="black") + def plot4(): # Density 1 Z = gen_gaussian_plot_vals(x_hat, Sigma) @@ -77,13 +82,13 @@ def plot4(): ax.clabel(cs1, inline=1, fontsize=10) # Density 2 M = Sigma * G.T * linalg.inv(G * Sigma * G.T + R) - x_hat_F = x_hat + M * (y - G * x_hat) - Sigma_F = Sigma - M * G * Sigma + x_hat_F = x_hat + M * (y - G * x_hat) + Sigma_F = Sigma - M * G * Sigma Z_F = gen_gaussian_plot_vals(x_hat_F, Sigma_F) cs2 = ax.contour(X, Y, Z_F, 6, colors="black") ax.clabel(cs2, inline=1, fontsize=10) # Density 3 - new_x_hat = A * x_hat_F + new_x_hat = A * x_hat_F new_Sigma = A * Sigma_F * A.T + Q new_Z = gen_gaussian_plot_vals(new_x_hat, new_Sigma) cs3 = ax.contour(X, Y, new_Z, 6, colors="black") diff --git a/examples/ifp_savings_plots.py b/examples/ifp_savings_plots.py index 3a211a9d3..3340bb38b 100644 --- a/examples/ifp_savings_plots.py +++ b/examples/ifp_savings_plots.py @@ -13,7 +13,9 @@ # === solve for optimal consumption === # m = ConsumerProblem(r=0.03, grid_max=4) v_init, c_init = m.initialize() -c = compute_fixed_point(m.coleman_operator, c_init) #Coleman Operator takes in (c)? + +# Coleman Operator takes in (c)? +c = compute_fixed_point(m.coleman_operator, c_init) a = m.asset_grid R, z_vals = m.R, m.z_vals diff --git a/examples/illustrates_clt.py b/examples/illustrates_clt.py index 8d2ef6740..53c427b87 100644 --- a/examples/illustrates_clt.py +++ b/examples/illustrates_clt.py @@ -2,33 +2,33 @@ Filename: illustrates_clt.py Authors: John Stachurski and Thomas J. Sargent -Visual illustration of the central limit theorem. Histograms draws of +Visual illustration of the central limit theorem. Histograms draws of Y_n := \sqrt{n} (\bar X_n - \mu) for a given distribution of X_i, and a given choice of n. """ import numpy as np -from scipy.stats import expon, norm, poisson +from scipy.stats import expon, norm import matplotlib.pyplot as plt from matplotlib import rc # == Specifying font, needs LaTeX integration == # -rc('font',**{'family':'serif','serif':['Palatino']}) +rc('font', **{'family': 'serif', 'serif': ['Palatino']}) rc('text', usetex=True) # == Set parameters == # n = 250 # Choice of n -k = 100000 # Number of draws of Y_n -distribution = expon(2) # Exponential distribution, lambda = 1/2 +k = 100000 # Number of draws of Y_n +distribution = expon(2) # Exponential distribution, lambda = 1/2 mu, s = distribution.mean(), distribution.std() # == Draw underlying RVs. Each row contains a draw of X_1,..,X_n == # -data = distribution.rvs((k, n)) +data = distribution.rvs((k, n)) # == Compute mean of each row, producing k draws of \bar X_n == # -sample_means = data.mean(axis=1) +sample_means = data.mean(axis=1) # == Generate observations of Y_n == # -Y = np.sqrt(n) * (sample_means - mu) +Y = np.sqrt(n) * (sample_means - mu) # == Plot == # fig, ax = plt.subplots() @@ -40,6 +40,3 @@ ax.legend() plt.show() - - - diff --git a/examples/illustrates_lln.py b/examples/illustrates_lln.py index 6d37660be..49ff2e880 100644 --- a/examples/illustrates_lln.py +++ b/examples/illustrates_lln.py @@ -13,12 +13,12 @@ n = 100 # == Arbitrary collection of distributions == # -distributions = {"student's t with 10 degrees of freedom" : t(10), - "beta(2, 2)" : beta(2, 2), - "lognormal LN(0, 1/2)" : lognorm(0.5), - "gamma(5, 1/2)" : gamma(5, scale=2), - "poisson(4)" : poisson(4), - "exponential with lambda = 1" : expon(1)} +distributions = {"student's t with 10 degrees of freedom": t(10), + "beta(2, 2)": beta(2, 2), + "lognormal LN(0, 1/2)": lognorm(0.5), + "gamma(5, 1/2)": gamma(5, scale=2), + "poisson(4)": poisson(4), + "exponential with lambda = 1": expon(1)} # == Create a figure and some axes == # num_plots = 3 @@ -26,10 +26,10 @@ # == Set some plotting parameters to improve layout == # bbox = (0., 1.02, 1., .102) -legend_args = {'ncol' : 2, - 'bbox_to_anchor' : bbox, - 'loc' : 3, - 'mode' : 'expand'} +legend_args = {'ncol': 2, + 'bbox_to_anchor': bbox, + 'loc': 3, + 'mode': 'expand'} plt.subplots_adjust(hspace=0.5) for ax in axes: diff --git a/examples/lin_interp_3d_plot.py b/examples/lin_interp_3d_plot.py index 5e1d37a94..50413e770 100644 --- a/examples/lin_interp_3d_plot.py +++ b/examples/lin_interp_3d_plot.py @@ -4,8 +4,7 @@ Authors: John Stachurski, Thomas J. Sargent LastModified: 21/08/2013 """ - -from scipy.interpolate import interp2d, LinearNDInterpolator +from scipy.interpolate import LinearNDInterpolator import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D import numpy as np @@ -13,8 +12,9 @@ alpha = 0.7 phi_ext = 2 * 3.14 * 0.5 + def f(a, b): - #return 2 + alpha - 2 * np.cos(b)*np.cos(a) - alpha * np.cos(phi_ext - 2*b) + # return 2 + alpha - 2 * np.cos(b)*np.cos(a) - alpha*np.cos(phi_ext - 2*b) return a + np.sqrt(b) x_max = 3 @@ -30,7 +30,7 @@ def f(a, b): # === generate the function values on the grid === # Z0 = np.empty(Nx0 * Ny0) for i in range(len(Z0)): - a, b = points[i,:] + a, b = points[i, :] Z0[i] = f(a, b) g = LinearNDInterpolator(points, Z0) @@ -42,15 +42,15 @@ def f(a, b): X1, Y1 = np.meshgrid(x1, y1) # === the approximating function, as a matrix, for plotting === # -#ZA = np.empty((Ny1, Nx1)) -#for i in range(Ny1): +# ZA = np.empty((Ny1, Nx1)) +# for i in range(Ny1): # for j in range(Nx1): # ZA[i, j] = g(x1[j], y1[i]) ZA = g(X1, Y1) ZF = f(X1, Y1) # === plot === # -fig = plt.figure(figsize=(8,6)) +fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(1, 1, 1, projection='3d') p = ax.plot_wireframe(X1, Y1, ZF, rstride=4, cstride=4) plt.show() diff --git a/examples/linapprox.py b/examples/linapprox.py index 089fc6bed..5fd147702 100644 --- a/examples/linapprox.py +++ b/examples/linapprox.py @@ -2,12 +2,14 @@ import scipy as sp import matplotlib.pyplot as plt + def f(x): - y1 = 2 * np.cos(6 * x) + np.sin(14 * x) + y1 = 2 * np.cos(6 * x) + np.sin(14 * x) return y1 + 2.5 c_grid = np.linspace(0, 1, 6) + def Af(x): return sp.interp(x, c_grid, f(c_grid)) @@ -17,7 +19,8 @@ def Af(x): ax.set_xlim(0, 1) ax.plot(f_grid, f(f_grid), 'b-', lw=2, alpha=0.8, label='true function') -ax.plot(f_grid, Af(f_grid), 'g-', lw=2, alpha=0.8, label='linear approximation') +ax.plot(f_grid, Af(f_grid), 'g-', lw=2, alpha=0.8, + label='linear approximation') ax.vlines(c_grid, c_grid * 0, f(c_grid), linestyle='dashed', alpha=0.5) ax.legend(loc='upper center') diff --git a/examples/lq_permanent_1.py b/examples/lq_permanent_1.py index d41448de4..9fe72be4e 100644 --- a/examples/lq_permanent_1.py +++ b/examples/lq_permanent_1.py @@ -20,10 +20,10 @@ # == Formulate as an LQ problem == # Q = 1 -R = np.zeros((2, 2)) +R = np.zeros((2, 2)) Rf = np.zeros((2, 2)) Rf[0, 0] = q -A = [[1 + r, -c_bar + mu], +A = [[1 + r, -c_bar + mu], [0, 1]] B = [[-1], [0]] @@ -36,7 +36,7 @@ xp, up, wp = lq.compute_sequence(x0) # == Convert back to assets, consumption and income == # -assets = xp[0, :] # a_t +assets = xp[0, :] # a_t c = up.flatten() + c_bar # c_t income = wp[0, 1:] + mu # y_t @@ -49,17 +49,18 @@ axes[i].grid() axes[i].set_xlabel(r'Time') bbox = (0., 1.02, 1., .102) -legend_args = {'bbox_to_anchor' : bbox, 'loc' : 3, 'mode' : 'expand'} -p_args = {'lw' : 2, 'alpha' : 0.7} +legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} +p_args = {'lw': 2, 'alpha': 0.7} -axes[0].plot(list(range(1, T+1)), income, 'g-', label="non-financial income", **p_args) +axes[0].plot(list(range(1, T+1)), income, 'g-', label="non-financial income", + **p_args) axes[0].plot(list(range(T)), c, 'k-', label="consumption", **p_args) axes[0].legend(ncol=2, **legend_args) -axes[1].plot(list(range(1, T+1)), np.cumsum(income - mu), 'r-', label="cumulative unanticipated income", **p_args) +axes[1].plot(list(range(1, T+1)), np.cumsum(income - mu), 'r-', + label="cumulative unanticipated income", **p_args) axes[1].plot(list(range(T+1)), assets, 'b-', label="assets", **p_args) axes[1].plot(list(range(T)), np.zeros(T), 'k-') axes[1].legend(ncol=2, **legend_args) plt.show() - diff --git a/examples/lqramsey.py b/examples/lqramsey.py index 768447d41..900d47c94 100644 --- a/examples/lqramsey.py +++ b/examples/lqramsey.py @@ -16,7 +16,7 @@ import sys import numpy as np -from numpy import sqrt, max, eye, dot, zeros, cumsum, array +from numpy import sqrt, eye, dot, zeros, cumsum from numpy.random import randn import scipy.linalg import matplotlib.pyplot as plt @@ -103,7 +103,7 @@ def compute_paths(T, econ): # == Simulate the exogenous process x == # if econ.discrete: state = mc_sample_path(P, init=0, sample_size=T) - x = x_vals[:,state] + x = x_vals[:, state] else: # == Generate an initial condition x0 satisfying x0 = A x0 == # nx, nx = A.shape @@ -116,16 +116,16 @@ def compute_paths(T, econ): x = zeros((nx, T)) w = randn(nw, T) x[:, 0] = x0.T - for t in range(1,T): + for t in range(1, T): x[:, t] = dot(A, x[:, t-1]) + dot(C, w[:, t]) # == Compute exogenous variable sequences == # g, d, b, s = (dot(S, x).flatten() for S in (Sg, Sd, Sb, Ss)) # == Solve for Lagrange multiplier in the govt budget constraint == # - ## In fact we solve for nu = lambda / (1 + 2*lambda). Here nu is the - ## solution to a quadratic equation a(nu**2 - nu) + b = 0 where - ## a and b are expected discounted sums of quadratic forms of the state. + # In fact we solve for nu = lambda / (1 + 2*lambda). Here nu is the + # solution to a quadratic equation a(nu**2 - nu) + b = 0 where + # a and b are expected discounted sums of quadratic forms of the state. Sm = Sb - Sd - Ss # == Compute a and b == # if econ.discrete: @@ -176,7 +176,7 @@ def compute_paths(T, econ): B = temp[state] / p H = dot(P[state, :], dot(Sb - Sc, x_vals).T).flatten() R = p / (beta * H) - temp = dot(P[state,:], dot(Sb - Sc, x_vals).T).flatten() + temp = dot(P[state, :], dot(Sb - Sc, x_vals).T).flatten() xi = p[1:] / temp[:T-1] else: H = dot(Sl.T, Sl) - dot((Sb - Sc).T, Sl - Sg) @@ -188,7 +188,7 @@ def compute_paths(T, econ): R = 1 / Rinv AF1 = dot(Sb - Sc, x[:, 1:]) AF2 = dot(dot(Sb - Sc, A), x[:, :T-1]) - xi = AF1 / AF2 + xi = AF1 / AF2 xi = xi.flatten() pi = B[1:] - R[:T-1] * B[:T-1] - rvn[:T-1] + g[:T-1] @@ -230,8 +230,8 @@ def gen_fig_1(path): axes[i, j].grid() axes[i, j].set_xlabel(r'Time') bbox = (0., 1.02, 1., .102) - legend_args = {'bbox_to_anchor' : bbox, 'loc' : 3, 'mode' : 'expand'} - p_args = {'lw' : 2, 'alpha' : 0.7} + legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} + p_args = {'lw': 2, 'alpha': 0.7} # == Plot consumption, govt expenditure and revenue == # ax = axes[0, 0] @@ -242,21 +242,21 @@ def gen_fig_1(path): # == Plot govt expenditure and debt == # ax = axes[0, 1] - ax.plot(list(range(1,T+1)), path.rvn, label=r'$\tau_t \ell_t$', **p_args) - ax.plot(list(range(1,T+1)), path.g, label=r'$g_t$', **p_args) - ax.plot(list(range(1,T)), path.B[1:T], label=r'$B_{t+1}$', **p_args) + ax.plot(list(range(1, T+1)), path.rvn, label=r'$\tau_t \ell_t$', **p_args) + ax.plot(list(range(1, T+1)), path.g, label=r'$g_t$', **p_args) + ax.plot(list(range(1, T)), path.B[1:T], label=r'$B_{t+1}$', **p_args) ax.legend(ncol=3, **legend_args) # == Plot risk free return == # ax = axes[1, 0] - ax.plot(list(range(1,T+1)), path.R - 1, label=r'$R_t - 1$', **p_args) + ax.plot(list(range(1, T+1)), path.R - 1, label=r'$R_t - 1$', **p_args) ax.legend(ncol=1, **legend_args) # == Plot revenue, expenditure and risk free rate == # ax = axes[1, 1] - ax.plot(list(range(1,T+1)), path.rvn, label=r'$\tau_t \ell_t$', **p_args) - ax.plot(list(range(1,T+1)), path.g, label=r'$g_t$', **p_args) - axes[1, 1].plot(list(range(1,T)), path.pi, label=r'$\pi_{t+1}$', **p_args) + ax.plot(list(range(1, T+1)), path.rvn, label=r'$\tau_t \ell_t$', **p_args) + ax.plot(list(range(1, T+1)), path.g, label=r'$g_t$', **p_args) + axes[1, 1].plot(list(range(1, T)), path.pi, label=r'$\pi_{t+1}$', **p_args) ax.legend(ncol=3, **legend_args) plt.show() @@ -276,22 +276,21 @@ def gen_fig_2(path): plt.subplots_adjust(hspace=0.5) bbox = (0., 1.02, 1., .102) bbox = (0., 1.02, 1., .102) - legend_args = {'bbox_to_anchor' : bbox, 'loc' : 3, 'mode' : 'expand'} - p_args = {'lw' : 2, 'alpha' : 0.7} + legend_args = {'bbox_to_anchor': bbox, 'loc': 3, 'mode': 'expand'} + p_args = {'lw': 2, 'alpha': 0.7} # == Plot adjustment factor == # ax = axes[0] - ax.plot(list(range(2,T+1)), path.xi, label=r'$\xi_t$', **p_args) + ax.plot(list(range(2, T+1)), path.xi, label=r'$\xi_t$', **p_args) ax.grid() ax.set_xlabel(r'Time') ax.legend(ncol=1, **legend_args) # == Plot adjusted cumulative return == # ax = axes[1] - ax.plot(list(range(2,T+1)), path.Pi, label=r'$\Pi_t$', **p_args) + ax.plot(list(range(2, T+1)), path.Pi, label=r'$\Pi_t$', **p_args) ax.grid() ax.set_xlabel(r'Time') ax.legend(ncol=1, **legend_args) plt.show() - diff --git a/examples/lqramsey_ar1.py b/examples/lqramsey_ar1.py index 778c15142..0eea50061 100644 --- a/examples/lqramsey_ar1.py +++ b/examples/lqramsey_ar1.py @@ -14,7 +14,7 @@ beta = 1 / 1.05 rho, mg = .7, .35 A = np.identity(2) -A[0,:] = rho, mg * (1-rho) +A[0, :] = rho, mg * (1-rho) C = np.zeros((2, 1)) C[0, 0] = np.sqrt(1 - rho**2) * mg / 10 Sg = array((1, 0)).reshape(1, 2) @@ -23,15 +23,13 @@ Ss = array((0, 0)).reshape(1, 2) economy = lqramsey.Economy(beta=beta, - Sg=Sg, - Sd=Sd, - Sb=Sb, - Ss=Ss, - discrete=False, - proc=(A, C)) + Sg=Sg, + Sd=Sd, + Sb=Sb, + Ss=Ss, + discrete=False, + proc=(A, C)) T = 50 path = lqramsey.compute_paths(T, economy) lqramsey.gen_fig_1(path) - - diff --git a/examples/lqramsey_discrete.py b/examples/lqramsey_discrete.py index 2a252287b..5db8c157d 100644 --- a/examples/lqramsey_discrete.py +++ b/examples/lqramsey_discrete.py @@ -2,23 +2,21 @@ Filename: lqramsey_discrete.py Authors: Thomas Sargent, Doc-Jin Jang, Jeong-hun Choi, John Stachurski -LQ Ramsey model with discrete exogenous process. +LQ Ramsey model with discrete exogenous process. """ - -import numpy as np from numpy import array import lqramsey # == Parameters == # -beta = 1 / 1.05 -P = array([[0.8, 0.2, 0.0], - [0.0, 0.5, 0.5], - [0.0, 0.0, 1.0]]) +beta = 1 / 1.05 +P = array([[0.8, 0.2, 0.0], + [0.0, 0.5, 0.5], + [0.0, 0.0, 1.0]]) # == Possible states of the world == # # Each column is a state of the world. The rows are [g d b s 1] -x_vals = array([[0.5, 0.5, 0.25], - [0.0, 0.0, 0.0], +x_vals = array([[0.5, 0.5, 0.25], + [0.0, 0.0, 0.0], [2.2, 2.2, 2.2], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]) @@ -27,13 +25,13 @@ Sb = array((0, 0, 1, 0, 0)).reshape(1, 5) Ss = array((0, 0, 0, 1, 0)).reshape(1, 5) -economy = lqramsey.Economy(beta=beta, - Sg=Sg, - Sd=Sd, - Sb=Sb, - Ss=Ss, - discrete=True, - proc=(P, x_vals)) +economy = lqramsey.Economy(beta=beta, + Sg=Sg, + Sd=Sd, + Sb=Sb, + Ss=Ss, + discrete=True, + proc=(P, x_vals)) T = 15 path = lqramsey.compute_paths(T, economy) diff --git a/examples/lucas_tree_price1.py b/examples/lucas_tree_price1.py index 89f5ea89d..99ce92269 100644 --- a/examples/lucas_tree_price1.py +++ b/examples/lucas_tree_price1.py @@ -1,6 +1,5 @@ from __future__ import division # Omit for Python 3.x -import numpy as np import matplotlib.pyplot as plt from quantecon.models import LucasTree @@ -21,4 +20,3 @@ ax.legend(loc='upper left') plt.show() - diff --git a/examples/mc_convergence_plot.py b/examples/mc_convergence_plot.py index 27d84b070..2bc034075 100644 --- a/examples/mc_convergence_plot.py +++ b/examples/mc_convergence_plot.py @@ -8,8 +8,8 @@ import matplotlib.pyplot as plt from quantecon import mc_compute_stationary -P = ((0.971, 0.029, 0.000), - (0.145, 0.778, 0.077), +P = ((0.971, 0.029, 0.000), + (0.145, 0.778, 0.077), (0.000, 0.508, 0.492)) P = np.array(P) diff --git a/examples/nds.py b/examples/nds.py index 05da9718b..58adaa30f 100644 --- a/examples/nds.py +++ b/examples/nds.py @@ -1,4 +1,4 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np from scipy.stats import norm from random import uniform @@ -12,5 +12,3 @@ ax.plot(x, y, linewidth=2, alpha=0.6, label=current_label) ax.legend() plt.show() - - diff --git a/examples/nx_demo.py b/examples/nx_demo.py index 8b0e1bf8f..df40a0645 100644 --- a/examples/nx_demo.py +++ b/examples/nx_demo.py @@ -5,7 +5,6 @@ import networkx as nx import matplotlib.pyplot as plt -from matplotlib import cm import numpy as np G = nx.random_geometric_graph(200, 0.12) # Generate random graph @@ -13,7 +12,7 @@ # find node nearest the center point (0.5,0.5) dists = [(x - 0.5)**2 + (y - 0.5)**2 for x, y in list(pos.values())] ncenter = np.argmin(dists) -# Plot graph, coloring by path length from central node +# Plot graph, coloring by path length from central node p = nx.single_source_shortest_path_length(G, ncenter) plt.figure() nx.draw_networkx_edges(G, pos, alpha=0.4) diff --git a/examples/odu_vfi_plots.py b/examples/odu_vfi_plots.py index 49d01f889..e4fc74430 100644 --- a/examples/odu_vfi_plots.py +++ b/examples/odu_vfi_plots.py @@ -25,7 +25,7 @@ pi_plot_grid = np.linspace(0.001, 0.99, pi_plot_grid_size) w_plot_grid = np.linspace(0, sp.w_max, w_plot_grid_size) -#plot_choice = 'value_function' +# plot_choice = 'value_function' plot_choice = 'policy_function' if plot_choice == 'value_function': diff --git a/examples/optgrowth_v0.py b/examples/optgrowth_v0.py index 8ddec3257..39b52dc13 100644 --- a/examples/optgrowth_v0.py +++ b/examples/optgrowth_v0.py @@ -13,19 +13,22 @@ from scipy.optimize import fminbound from scipy import interp -## Primitives and grid +# Primitives and grid alpha = 0.65 -beta=0.95 -grid_max=2 -grid_size=150 +beta = 0.95 +grid_max = 2 +grid_size = 150 grid = np.linspace(1e-6, grid_max, grid_size) -## Exact solution +# Exact solution ab = alpha * beta c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta) c2 = alpha / (1 - ab) + + def v_star(k): return c1 + c2 * log(k) + def bellman_operator(w): """ The approximate Bellman operator, which computes and returns the updated @@ -37,19 +40,19 @@ def bellman_operator(w): points. """ # === Apply linear interpolation to w === # - Aw = lambda x: interp(x, grid, w) + Aw = lambda x: interp(x, grid, w) # === set Tw[i] equal to max_c { log(c) + beta w(f(k_i) - c)} === # Tw = np.empty(grid_size) for i, k in enumerate(grid): - objective = lambda c: - log(c) - beta * Aw(k**alpha - c) + objective = lambda c: - log(c) - beta * Aw(k**alpha - c) c_star = fminbound(objective, 1e-6, k**alpha) Tw[i] = - objective(c_star) return Tw # === If file is run directly, not imported, produce figure === # -if __name__ == '__main__': +if __name__ == '__main__': w = 5 * log(grid) - 25 # An initial condition -- fairly arbitrary n = 35 @@ -66,4 +69,3 @@ def bellman_operator(w): ax.legend(loc='upper left') plt.show() - diff --git a/examples/perm_inc_figs.py b/examples/perm_inc_figs.py index 885c77de8..7786f9bf5 100644 --- a/examples/perm_inc_figs.py +++ b/examples/perm_inc_figs.py @@ -14,8 +14,9 @@ sigma = 0.15 mu = 1 + def time_path(): - w = np.random.randn(T+1) # w_0, w_1, ..., w_T + w = np.random.randn(T+1) # w_0, w_1, ..., w_T w[0] = 0 b = np.zeros(T+1) for t in range(1, T+1): @@ -30,14 +31,16 @@ def time_path(): if 1: fig, ax = plt.subplots() - p_args = {'lw' : 2, 'alpha' : 0.7} + p_args = {'lw': 2, 'alpha': 0.7} ax.grid() ax.set_xlabel(r'Time') bbox = (0., 1.02, 1., .102) - legend_args = {'bbox_to_anchor' : bbox, 'loc' : 'upper left', 'mode' : 'expand'} + legend_args = {'bbox_to_anchor': bbox, 'loc': 'upper left', + 'mode': 'expand'} w, b, c = time_path() - ax.plot(list(range(T+1)), mu + sigma * w, 'g-', label="non-financial income", **p_args) + ax.plot(list(range(T+1)), mu + sigma * w, 'g-', + label="non-financial income", **p_args) ax.plot(list(range(T+1)), c, 'k-', label="consumption", **p_args) ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args) ax.legend(ncol=3, **legend_args) @@ -49,7 +52,7 @@ def time_path(): if 0: fig, ax = plt.subplots() - p_args = {'lw' : 0.8, 'alpha' : 0.7} + p_args = {'lw': 0.8, 'alpha': 0.7} ax.grid() ax.set_xlabel(r'Time') ax.set_ylabel(r'Consumption') @@ -60,5 +63,3 @@ def time_path(): ax.plot(list(range(T+1)), c, color=rcolor, **p_args) plt.show() - - diff --git a/examples/perm_inc_ir.py b/examples/perm_inc_ir.py index 3e2655117..a63cd1b8e 100644 --- a/examples/perm_inc_ir.py +++ b/examples/perm_inc_ir.py @@ -13,10 +13,11 @@ S = 5 # Impulse date sigma1 = sigma2 = 0.15 + def time_path(permanent=False): "Time path of consumption and debt given shock sequence" - w1 = np.zeros(T+1) - w2 = np.zeros(T+1) + w1 = np.zeros(T+1) + w2 = np.zeros(T+1) b = np.zeros(T+1) c = np.zeros(T+1) if permanent: @@ -31,7 +32,7 @@ def time_path(permanent=False): fig, axes = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.5) -p_args = {'lw' : 2, 'alpha' : 0.7} +p_args = {'lw': 2, 'alpha': 0.7} L = 0.175 @@ -55,4 +56,3 @@ def time_path(permanent=False): ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args) ax.legend(loc='lower right') plt.show() - diff --git a/examples/plot_example_1.py b/examples/plot_example_1.py index d653340ed..441bfb4d6 100644 --- a/examples/plot_example_1.py +++ b/examples/plot_example_1.py @@ -1,4 +1,4 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots() x = np.linspace(0, 10, 200) diff --git a/examples/plot_example_2.py b/examples/plot_example_2.py index e510ddf80..e7c4915c0 100644 --- a/examples/plot_example_2.py +++ b/examples/plot_example_2.py @@ -1,4 +1,4 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots() x = np.linspace(0, 10, 200) diff --git a/examples/plot_example_3.py b/examples/plot_example_3.py index b9c868ea6..2b8a8ab16 100644 --- a/examples/plot_example_3.py +++ b/examples/plot_example_3.py @@ -1,4 +1,4 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots() x = np.linspace(0, 10, 200) @@ -6,4 +6,3 @@ ax.plot(x, y, 'r-', lw=2, label=r'$y=\sin(x)$', alpha=0.6) ax.legend(loc='upper center') plt.show() - diff --git a/examples/plot_example_4.py b/examples/plot_example_4.py index bf7ea167b..4bb052f5b 100644 --- a/examples/plot_example_4.py +++ b/examples/plot_example_4.py @@ -1,4 +1,4 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np from scipy.stats import norm from random import uniform diff --git a/examples/plot_example_5.py b/examples/plot_example_5.py index c4fea2144..b924d9731 100644 --- a/examples/plot_example_5.py +++ b/examples/plot_example_5.py @@ -1,5 +1,4 @@ -import matplotlib.pyplot as plt -import numpy as np +import matplotlib.pyplot as plt from scipy.stats import norm from random import uniform num_rows, num_cols = 2, 3 @@ -11,7 +10,6 @@ axes[i, j].hist(x, alpha=0.6, bins=20) t = r'$\mu = {0:.1f},\; \sigma = {1:.1f}$'.format(m, s) axes[i, j].set_title(t) - axes[i, j].set_xticks([-4, 0, 4]) + axes[i, j].set_xticks([-4, 0, 4]) axes[i, j].set_yticks([]) plt.show() - diff --git a/examples/preim1.py b/examples/preim1.py index 3440f5ecc..aed9b6144 100644 --- a/examples/preim1.py +++ b/examples/preim1.py @@ -5,6 +5,7 @@ import matplotlib.pyplot as plt import numpy as np + def f(x): return 0.6 * np.cos(4 * x) + 1.4 @@ -17,7 +18,7 @@ def f(x): fig, axes = plt.subplots(2, 1, figsize=(8, 8)) for ax in axes: -# Set the axes through the origin + # Set the axes through the origin for spine in ['left', 'bottom']: ax.spines[spine].set_position('zero') for spine in ['right', 'top']: @@ -28,7 +29,7 @@ def f(x): ax.set_yticks(()) ax.set_xticks(()) - ax.plot(x, y, 'k-', lw=2, label=r'$f$') + ax.plot(x, y, 'k-', lw=2, label=r'$f$') ax.fill_between(x, ya, yb, facecolor='blue', alpha=0.05) ax.vlines([0], ya, yb, lw=3, color='blue', label=r'range of $f$') ax.text(0.04, -0.3, '$0$', fontsize=16) diff --git a/examples/pylab_eg2.py b/examples/pylab_eg2.py index f28448850..202142ba1 100644 --- a/examples/pylab_eg2.py +++ b/examples/pylab_eg2.py @@ -4,4 +4,3 @@ y = np.sin(x) plt.plot(x, y, 'b-', linewidth=2) plt.show() - diff --git a/examples/qm_plot.py b/examples/qm_plot.py index 6ba163c2f..7ee08f850 100644 --- a/examples/qm_plot.py +++ b/examples/qm_plot.py @@ -1,6 +1,7 @@ import matplotlib.pyplot as plt import numpy as np + def qm(x0, n): x = np.empty(n+1) x[0] = x0 diff --git a/examples/qs.py b/examples/qs.py index 2845c3ec8..d7d93c44c 100644 --- a/examples/qs.py +++ b/examples/qs.py @@ -18,7 +18,7 @@ ax.spines['top'].set_color('none') ax.spines['left'].set_color('none') ax.xaxis.set_ticks_position('bottom') -ax.spines['bottom'].set_position(('data',0)) +ax.spines['bottom'].set_position(('data', 0)) ax.set_ylim(-0.05, 0.5) ax.set_xticks((x,)) @@ -36,12 +36,12 @@ ax.annotate(r'$Q(x,\cdot)$', xy=(6.6, 0.2), xycoords='data', - xytext=(20, 90), textcoords='offset points', fontsize=16, - arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2")) + xytext=(20, 90), textcoords='offset points', fontsize=16, + arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2")) ax.annotate(r'$Q^2(x,\cdot)$', xy=(3.6, 0.24), xycoords='data', - xytext=(20, 90), textcoords='offset points', fontsize=16, - arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2")) + xytext=(20, 90), textcoords='offset points', fontsize=16, + arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2")) ax.annotate(r'$Q^3(x,\cdot)$', xy=(-0.2, 0.28), xycoords='data', - xytext=(-90, 90), textcoords='offset points', fontsize=16, - arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.2")) + xytext=(-90, 90), textcoords='offset points', fontsize=16, + arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.2")) fig.show() diff --git a/examples/quadmap_class.py b/examples/quadmap_class.py index 04e29dba3..ba32d8059 100644 --- a/examples/quadmap_class.py +++ b/examples/quadmap_class.py @@ -5,7 +5,7 @@ """ -class QuadMap: +class QuadMap(object): def __init__(self, initial_state): self.x = initial_state @@ -14,9 +14,9 @@ def update(self): "Apply the quadratic map to update the state." self.x = 4 * self.x * (1 - self.x) - def generate_series(self, n): + def generate_series(self, n): """ - Generate and return a trajectory of length n, starting at the + Generate and return a trajectory of length n, starting at the current state. """ trajectory = [] @@ -24,4 +24,3 @@ def generate_series(self, n): trajectory.append(self.x) self.update() return trajectory - diff --git a/examples/robust_monopolist.py b/examples/robust_monopolist.py index e8aeb563f..44f260db3 100644 --- a/examples/robust_monopolist.py +++ b/examples/robust_monopolist.py @@ -1,22 +1,22 @@ """ Filename: robust_monopolist.py -Authors: Chase Coleman, Spencer Lyon, Thomas Sargent, John Stachurski +Authors: Chase Coleman, Spencer Lyon, Thomas Sargent, John Stachurski The robust control problem for a monopolist with adjustment costs. The inverse demand curve is: p_t = a_0 - a_1 y_t + d_t -where d_{t+1} = \rho d_t + \sigma_d w_{t+1} for w_t ~ N(0,1) and iid. +where d_{t+1} = \rho d_t + \sigma_d w_{t+1} for w_t ~ N(0, 1) and iid. The period return function for the monopolist is - r_t = p_t y_t - gamma (y_{t+1} - y_t)^2 / 2 - c y_t + r_t = p_t y_t - gamma (y_{t+1} - y_t)^2 / 2 - c y_t The objective of the firm is E_t \sum_{t=0}^\infty \beta^t r_t For the linear regulator, we take the state and control to be - x_t = (1, y_t, d_t) and u_t = y_{t+1} - y_t + x_t = (1, y_t, d_t) and u_t = y_{t+1} - y_t """ @@ -44,26 +44,26 @@ # == Define LQ matrices == # -R = np.array([[0., ac, 0.], - [ac, -a_1, 0.5], +R = np.array([[0., ac, 0.], + [ac, -a_1, 0.5], [0., 0.5, 0.]]) R = -R # For minimization Q = gamma / 2 -A = np.array([[1., 0., 0.], - [0., 1., 0.], +A = np.array([[1., 0., 0.], + [0., 1., 0.], [0., 0., rho]]) -B = np.array([[0.], - [1.], +B = np.array([[0.], + [1.], [0.]]) -C = np.array([[0.], - [0.], +C = np.array([[0.], + [0.], [sigma_d]]) -#-----------------------------------------------------------------------------# -# Functions -#-----------------------------------------------------------------------------# +# -------------------------------------------------------------------------- # +# Functions +# -------------------------------------------------------------------------- # def evaluate_policy(theta, F): @@ -87,20 +87,20 @@ def value_and_entropy(emax, F, bw, grid_size=1000): Parameters ========== - emax : scalar + emax: scalar The target entropy value - F : array_like + F: array_like The policy function to be evaluated - bw : str + bw: str A string specifying whether the implied shock path follows best or worst assumptions. The only acceptable values are 'best' and 'worst'. Returns ======= - df : pd.DataFrame + df: pd.DataFrame A pandas DataFrame containing the value function and entropy values up to the emax parameter. The columns are 'value' and 'entropy'. @@ -122,9 +122,9 @@ def value_and_entropy(emax, F, bw, grid_size=1000): return df -#-----------------------------------------------------------------------------# +# -------------------------------------------------------------------------- # # Main -#-----------------------------------------------------------------------------# +# -------------------------------------------------------------------------- # # == Compute the optimal rule == # @@ -157,16 +157,17 @@ def value_and_entropy(emax, F, bw, grid_size=1000): ax.grid() for axis in 'x', 'y': - plt.ticklabel_format(style='sci', axis=axis, scilimits=(0,0)) + plt.ticklabel_format(style='sci', axis=axis, scilimits=(0, 0)) -plot_args = {'lw' : 2, 'alpha' : 0.7} +plot_args = {'lw': 2, 'alpha': 0.7} colors = 'r', 'b' df_pairs = ((optimal_best_case, optimal_worst_case), (robust_best_case, robust_worst_case)) -class Curve: + +class Curve(object): def __init__(self, x, y): self.x, self.y = x, y @@ -186,10 +187,9 @@ def __call__(self, z): print(ax.plot(egrid, curve(egrid), color=c, **plot_args)) curves.append(curve) # == Color fill between curves == # - ax.fill_between(egrid, - curves[0](egrid), - curves[1](egrid), - color=c, alpha=0.1) + ax.fill_between(egrid, + curves[0](egrid), + curves[1](egrid), + color=c, alpha=0.1) plt.show() - diff --git a/examples/sine2.py b/examples/sine2.py index 7b80001bb..e4be028a2 100644 --- a/examples/sine2.py +++ b/examples/sine2.py @@ -1,4 +1,4 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots() x = np.linspace(0, 10, 200) @@ -6,5 +6,3 @@ ax.plot(x, y, 'r-', linewidth=2, label='sine function', alpha=0.6) ax.legend() plt.show() - - diff --git a/examples/sine3.py b/examples/sine3.py index cd19cd4d9..de2b8a466 100644 --- a/examples/sine3.py +++ b/examples/sine3.py @@ -1,4 +1,4 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots() x = np.linspace(0, 10, 200) @@ -6,5 +6,3 @@ ax.plot(x, y, 'r-', linewidth=2, label='sine function', alpha=0.6) ax.legend(loc='upper center') plt.show() - - diff --git a/examples/sine4.py b/examples/sine4.py index 708bb3939..4e5cac4c6 100644 --- a/examples/sine4.py +++ b/examples/sine4.py @@ -1,4 +1,4 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots() x = np.linspace(0, 10, 200) @@ -6,4 +6,3 @@ ax.plot(x, y, 'r-', linewidth=2, label=r'$y=\sin(x)$', alpha=0.6) ax.legend(loc='upper center') plt.show() - diff --git a/examples/sine5.py b/examples/sine5.py index edcc31f5a..9cb6e2100 100644 --- a/examples/sine5.py +++ b/examples/sine5.py @@ -1,12 +1,10 @@ -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import numpy as np fig, ax = plt.subplots() x = np.linspace(0, 10, 200) y = np.sin(x) ax.plot(x, y, 'r-', linewidth=2, label=r'$y=\sin(x)$', alpha=0.6) ax.legend(loc='upper center') -ax.set_yticks([-1, 0, 1]) -ax.set_title('Test plot') +ax.set_yticks([-1, 0, 1]) +ax.set_title('Test plot') plt.show() - - diff --git a/examples/six_hists.py b/examples/six_hists.py index 1e723ba56..21386e33a 100644 --- a/examples/six_hists.py +++ b/examples/six_hists.py @@ -1,5 +1,4 @@ -import matplotlib.pyplot as plt -import numpy as np +import matplotlib.pyplot as plt from scipy.stats import norm from random import uniform num_rows, num_cols = 3, 2 @@ -11,8 +10,6 @@ axes[i, j].hist(x, alpha=0.6, bins=20) t = r'$\mu = {0:.1f}, \quad \sigma = {1:.1f}$'.format(m, s) axes[i, j].set_title(t) - axes[i, j].set_xticks([-4, 0, 4]) + axes[i, j].set_xticks([-4, 0, 4]) axes[i, j].set_yticks([]) plt.show() - - diff --git a/examples/subplots.py b/examples/subplots.py index 0307804a7..d955a4936 100644 --- a/examples/subplots.py +++ b/examples/subplots.py @@ -2,6 +2,7 @@ import matplotlib.pyplot as plt import numpy as np + def subplots(): "Custom subplots with axes throught the origin" fig, ax = plt.subplots() @@ -11,7 +12,7 @@ def subplots(): ax.spines[spine].set_position('zero') for spine in ['right', 'top']: ax.spines[spine].set_color('none') - + ax.grid() return fig, ax @@ -22,4 +23,3 @@ def subplots(): ax.plot(x, y, 'r-', linewidth=2, label='sine function', alpha=0.6) ax.legend(loc='lower right') plt.show() - diff --git a/examples/test_program_2.py b/examples/test_program_2.py index e658ad981..0282bb875 100644 --- a/examples/test_program_2.py +++ b/examples/test_program_2.py @@ -1,7 +1,7 @@ from random import normalvariate import matplotlib.pyplot as plt ts_length = 100 -epsilon_values = [] +epsilon_values = [] i = 0 while i < ts_length: e = normalvariate(0, 1) diff --git a/examples/test_program_3.py b/examples/test_program_3.py index df4df1bd0..80bf134ec 100644 --- a/examples/test_program_3.py +++ b/examples/test_program_3.py @@ -1,8 +1,9 @@ from random import normalvariate import matplotlib.pyplot as plt + def generate_data(n): - epsilon_values = [] + epsilon_values = [] for i in range(n): e = normalvariate(0, 1) epsilon_values.append(e) diff --git a/examples/test_program_4.py b/examples/test_program_4.py index 354fc1697..0b68e7d35 100644 --- a/examples/test_program_4.py +++ b/examples/test_program_4.py @@ -1,8 +1,9 @@ from random import normalvariate, uniform import matplotlib.pyplot as plt + def generate_data(n, generator_type): - epsilon_values = [] + epsilon_values = [] for i in range(n): if generator_type == 'U': e = uniform(0, 1) diff --git a/examples/test_program_5.py b/examples/test_program_5.py index 8c645183a..516714670 100644 --- a/examples/test_program_5.py +++ b/examples/test_program_5.py @@ -1,8 +1,9 @@ from random import normalvariate, uniform import matplotlib.pyplot as plt + def generate_data(n, generator_type): - epsilon_values = [] + epsilon_values = [] for i in range(n): e = uniform(0, 1) if generator_type == 'U' else normalvariate(0, 1) epsilon_values.append(e) diff --git a/examples/test_program_5_short.py b/examples/test_program_5_short.py index d91dfd7ee..6744c2c39 100644 --- a/examples/test_program_5_short.py +++ b/examples/test_program_5_short.py @@ -1,11 +1,15 @@ import pylab from random import normalvariate, uniform + def generate_data(n, generator_type): - epsilon_values = [] + epsilon_values = [] for i in range(n): - e = uniform(0, 1) if generator_type == 'U' \ - else normalvariate(0, 1) + if generator_type == "U": + e = uniform(0, 1) + else: + e = normalvariate(0, 1) + epsilon_values.append(e) return epsilon_values diff --git a/examples/test_program_6.py b/examples/test_program_6.py index 1b9950292..ed20f96e9 100644 --- a/examples/test_program_6.py +++ b/examples/test_program_6.py @@ -1,8 +1,9 @@ -from random import normalvariate, uniform +from random import uniform import matplotlib.pyplot as plt + def generate_data(n, generator_type): - epsilon_values = [] + epsilon_values = [] for i in range(n): e = generator_type(0, 1) epsilon_values.append(e) @@ -11,4 +12,3 @@ def generate_data(n, generator_type): data = generate_data(100, uniform) plt.plot(data, 'b-') plt.show() - diff --git a/examples/tsh_hg.py b/examples/tsh_hg.py index 928972097..624668283 100644 --- a/examples/tsh_hg.py +++ b/examples/tsh_hg.py @@ -3,7 +3,6 @@ import matplotlib.pyplot as plt from scipy.stats import norm from quantecon import LSS -import random phi_1, phi_2, phi_3, phi_4 = 0.5, -0.2, 0, 0.5 sigma = 0.1 @@ -20,7 +19,7 @@ ymin, ymax = -0.8, 1.25 -fig, ax = plt.subplots(figsize=(8,4)) +fig, ax = plt.subplots(figsize=(8, 4)) ax.set_xlim(ymin, ymax) ax.set_xlabel(r'$y_t$', fontsize=16) diff --git a/examples/us_cities.txt b/examples/us_cities.txt index 58b4bccc8..9be9260fc 100644 --- a/examples/us_cities.txt +++ b/examples/us_cities.txt @@ -1,6 +1,6 @@ new york: 8244910 los angeles: 3819702 -chicago: 2707120 +chicago: 2707120 houston: 2145146 philadelphia: 1536471 phoenix: 1469471 diff --git a/examples/vecs.py b/examples/vecs.py index e4ee386dc..326f30c2e 100644 --- a/examples/vecs.py +++ b/examples/vecs.py @@ -10,17 +10,17 @@ ax.spines[spine].set_position('zero') for spine in ['right', 'top']: ax.spines[spine].set_color('none') - + ax.set_xlim(-5, 5) ax.set_ylim(-5, 5) ax.grid() vecs = ((2, 4), (-3, 3), (-4, -3.5)) for v in vecs: - ax.annotate('', xy=v, xytext=(0, 0), - arrowprops=dict(facecolor='blue', - shrink=0, - alpha=0.7, - width=0.5)) + ax.annotate('', xy=v, xytext=(0, 0), + arrowprops=dict(facecolor='blue', + shrink=0, + alpha=0.7, + width=0.5)) ax.text(1.1 * v[0], 1.1 * v[1], str(v)) plt.show() diff --git a/examples/vecs2.py b/examples/vecs2.py index 08e58a464..fe3c85d42 100644 --- a/examples/vecs2.py +++ b/examples/vecs2.py @@ -11,16 +11,16 @@ ax.spines[spine].set_position('zero') for spine in ['right', 'top']: ax.spines[spine].set_color('none') - + ax.set_xlim(-5, 5) ax.set_ylim(-5, 5) x = (2, 2) -ax.annotate('', xy=x, xytext=(0, 0), - arrowprops=dict(facecolor='blue', - shrink=0, - alpha=1, - width=0.5)) +ax.annotate('', xy=x, xytext=(0, 0), + arrowprops=dict(facecolor='blue', + shrink=0, + alpha=1, + width=0.5)) ax.text(x[0] + 0.4, x[1] - 0.2, r'$x$', fontsize='16') @@ -29,11 +29,10 @@ for s in scalars: v = s * x - ax.annotate('', xy=v, xytext=(0, 0), - arrowprops=dict(facecolor='red', - shrink=0, - alpha=0.5, - width=0.5)) + ax.annotate('', xy=v, xytext=(0, 0), + arrowprops=dict(facecolor='red', + shrink=0, + alpha=0.5, + width=0.5)) ax.text(v[0] + 0.4, v[1] - 0.2, r'${} x$'.format(s), fontsize='16') plt.show() - diff --git a/examples/web_network.py b/examples/web_network.py index 69ca636a7..01e6ba724 100644 --- a/examples/web_network.py +++ b/examples/web_network.py @@ -1,20 +1,20 @@ - import numpy as np -import matplotlib.pyplot as plt import re alphabet = 'abcdefghijklmnopqrstuvwxyz' + def gen_rw_mat(n): "Generate an n x n matrix of zeros and ones." Q = np.random.randn(n, n) - 0.8 Q = np.where(Q > 0, 1, 0) # Make sure that no row contains only zeros for i in range(n): - if Q[i,:].sum() == 0: - Q[i,np.random.randint(0, n, 1)] = 1 + if Q[i, :].sum() == 0: + Q[i, np.random.randint(0, n, 1)] = 1 return Q + def adj_matrix_to_dot(Q, outfile='/tmp/foo_out.dot'): """ Convert an adjacency matrix to a dot file. @@ -29,6 +29,7 @@ def adj_matrix_to_dot(Q, outfile='/tmp/foo_out.dot'): f.write('}\n') f.close() + def dot_to_adj_matrix(node_num, infile='/tmp/foo_out.dot'): Q = np.zeros((node_num, node_num), dtype=int) f = open(infile, 'r') @@ -41,11 +42,10 @@ def dot_to_adj_matrix(node_num, infile='/tmp/foo_out.dot'): Q[i, j] = 1 return Q + def adj_matrix_to_markov(Q): n = Q.shape[0] P = np.empty((n, n)) for i in range(n): - P[i,:] = Q[i,:] / float(Q[i,:].sum()) + P[i, :] = Q[i, :] / float(Q[i, :].sum()) return P - - diff --git a/quantecon/arma.py b/quantecon/arma.py index d5635c3df..feff9e495 100644 --- a/quantecon/arma.py +++ b/quantecon/arma.py @@ -15,6 +15,7 @@ import warnings warnings.filterwarnings('ignore') + class ARMA(object): r""" This class represents scalar ARMA(p, q) processes. @@ -67,11 +68,11 @@ class ARMA(object): """ - def __init__(self, phi, theta=0, sigma=1) : + def __init__(self, phi, theta=0, sigma=1): self._phi, self._theta = phi, theta self.sigma = sigma self.set_params() - + @property def phi(self): return self._phi @@ -84,7 +85,7 @@ def phi(self, new_value): @property def theta(self): return self._theta - + @theta.setter def theta(self, new_value): self._theta = new_value @@ -182,7 +183,7 @@ def spectral_density(self, two_pi=True, res=1200): return w, spect - def autocovariance(self, num_autocov=16) : + def autocovariance(self, num_autocov=16): """ Compute the autocovariance function from the ARMA parameters over the integers range(num_autocov) using the spectral density and the inverse @@ -200,7 +201,7 @@ def autocovariance(self, num_autocov=16) : # num_autocov should be <= len(acov) / 2 return acov[:num_autocov] - def simulation(self, ts_length=90) : + def simulation(self, ts_length=90): """ Compute a simulated sample path assuming Gaussian shocks. @@ -228,7 +229,7 @@ def plot_impulse_response(self, ax=None, show=True): yi = self.impulse_response() ax.stem(list(range(len(yi))), yi) ax.set_xlim(xmin=(-0.5)) - ax.set_ylim(min(yi)-0.1,max(yi)+0.1) + ax.set_ylim(min(yi)-0.1, max(yi)+0.1) ax.set_xlabel('time') ax.set_ylabel('response') if show: @@ -270,7 +271,7 @@ def plot_simulation(self, ax=None, show=True): if show: plt.show() - def quad_plot(self) : + def quad_plot(self): """ Plots the impulse response, spectral_density, autocovariance, and one realization of the process. @@ -280,11 +281,9 @@ def quad_plot(self) : fig, axes = plt.subplots(num_rows, num_cols, figsize=(12, 8)) plt.subplots_adjust(hspace=0.4) plot_functions = [self.plot_impulse_response, - self.plot_spectral_density, - self.plot_autocovariance, - self.plot_simulation] + self.plot_spectral_density, + self.plot_autocovariance, + self.plot_simulation] for plot_func, ax in zip(plot_functions, axes.flatten()): plot_func(ax, show=False) plt.show() - - diff --git a/quantecon/cartesian.py b/quantecon/cartesian.py index a13bf0dfe..c1bfba45d 100644 --- a/quantecon/cartesian.py +++ b/quantecon/cartesian.py @@ -9,6 +9,7 @@ import numpy from numba import njit + def cartesian(nodes, order='C'): '''Cartesian product of a list of arrays @@ -31,7 +32,6 @@ def cartesian(nodes, order='C'): l = numpy.prod(shapes) out = numpy.zeros((l, n), dtype=dtype) - if order == 'C': repetitions = numpy.cumprod([1] + shapes[:-1]) else: @@ -42,10 +42,11 @@ def cartesian(nodes, order='C'): repetitions.reverse() for i in range(n): - _repeat_1d(nodes[i], repetitions[i], out[:,i]) + _repeat_1d(nodes[i], repetitions[i], out[:, i]) return out + def mlinspace(a, b, nums, order='C'): '''Constructs a regular cartesian grid @@ -71,14 +72,20 @@ def mlinspace(a, b, nums, order='C'): @njit def _repeat_1d(x, K, out): - '''Repeats each element of a vector many times and repeats the whole result many times + ''' + Repeats each element of a vector many times and repeats the + whole result many times Parameters ---------- - x: (1d array) vector to be repeated - K: (int) number of times each element of x is repeated (inner iterations) - out: (1d array) placeholder for the result + x : array_like(Any, ndim=1) + The vector to be repeated + K : scalar(int) + The number of times each element of x + is repeated (inner iterations) + out : array_like(Any, ndim=1) + placeholder for the result Returns ------- @@ -86,8 +93,8 @@ def _repeat_1d(x, K, out): ''' N = x.shape[0] - L = out.shape[0] // (K*N) # number of outer iterations - # K # number of inner iterations + L = out.shape[0] // (K*N) # number of outer iterations + # K # number of inner iterations # the result out should enumerate in C-order the elements # of a 3-dimensional array T of dimensions (K,N,L) diff --git a/quantecon/ecdf.py b/quantecon/ecdf.py index 64c5fac30..ff1baf0f6 100644 --- a/quantecon/ecdf.py +++ b/quantecon/ecdf.py @@ -10,6 +10,7 @@ import numpy as np + class ECDF(object): """ One-dimensional empirical distribution function given a vector of @@ -45,4 +46,3 @@ def __call__(self, x): """ return np.mean(self.observations <= x) - diff --git a/quantecon/ivp.py b/quantecon/ivp.py index 551448fe8..d28fe8c35 100644 --- a/quantecon/ivp.py +++ b/quantecon/ivp.py @@ -6,12 +6,12 @@ \frac{dy}{dt} = f(t,y),\ y(t_0) = y_0 using finite difference methods. The `quantecon.ivp` class uses various -integrators from the `scipy.integrate.ode` module to perform the integration -(i.e., solve the ODE) and parametric B-spline interpolation from -`scipy.interpolate` to approximate the value of the solution between grid -points. The `quantecon.ivp` module also provides a method for computing the -residual of the solution which can be used for assessing the overall accuracy -of the approximated solution. +integrators from the `scipy.integrate.ode` module to perform the +integration (i.e., solve the ODE) and parametric B-spline interpolation +from `scipy.interpolate` to approximate the value of the solution +between grid points. The `quantecon.ivp` module also provides a method +for computing the residual of the solution which can be used for +assessing the overall accuracy of the approximated solution.q @author : David R. Pugh @date : 2014-09-09 @@ -32,14 +32,14 @@ def __init__(self, f, jac=None): Parameters ---------- f : callable ``f(t, y, *f_args)`` - Right hand side of the system of equations defining the ODE. The - independent variable, ``t``, is a ``scalar``; ``y`` is an ``ndarray`` - of dependent variables with ``y.shape == (n,)``. The function `f` - should return a ``scalar``, ``ndarray`` or ``list`` (but not a - ``tuple``). + Right hand side of the system of equations defining the ODE. + The independent variable, ``t``, is a ``scalar``; ``y`` is + an ``ndarray`` of dependent variables with ``y.shape == + (n,)``. The function `f` should return a ``scalar``, + ``ndarray`` or ``list`` (but not a ``tuple``). jac : callable ``jac(t, y, *jac_args)``, optional(default=None) - Jacobian of the right hand side of the system of equations defining - the ODE. + Jacobian of the right hand side of the system of equations + defining the ODE. .. :math: @@ -130,7 +130,8 @@ def compute_residual(self, traj, ti, k=3, ext=2): # rhs of ode evaluated along approximate solution T = ti.size - rhs_ode = np.vstack(self.f(ti[i], soln[i, 1:], *self.f_params) for i in range(T)) + rhs_ode = np.vstack(self.f(ti[i], soln[i, 1:], *self.f_params) + for i in range(T)) rhs_ode = np.hstack((ti[:, np.newaxis], rhs_ode)) # should be roughly zero everywhere (if approximation is any good!) diff --git a/quantecon/kalman.py b/quantecon/kalman.py index 3f70d82e4..6cc0d85ac 100644 --- a/quantecon/kalman.py +++ b/quantecon/kalman.py @@ -12,6 +12,7 @@ from scipy.linalg import inv from .matrix_eqn import solve_discrete_riccati + class Kalman: r""" Implements the Kalman filter for the Gaussian state space model @@ -127,7 +128,7 @@ def prior_to_filtered(self, y): B = dot(dot(G, Sigma), G.T) + R M = dot(A, inv(B)) self.current_x_hat = x_hat + dot(M, (y - dot(G, x_hat))) - self.current_Sigma = Sigma - dot(M, dot(G, Sigma)) + self.current_Sigma = Sigma - dot(M, dot(G, Sigma)) def filtered_to_forecast(self): """ diff --git a/quantecon/lqcontrol.py b/quantecon/lqcontrol.py index 2755bb393..f2664362c 100644 --- a/quantecon/lqcontrol.py +++ b/quantecon/lqcontrol.py @@ -13,6 +13,7 @@ from scipy.linalg import solve from .matrix_eqn import solve_discrete_riccati + class LQ: r""" This class is for analyzing linear quadratic optimal control @@ -70,13 +71,13 @@ class LQ: state variable x and is `n x n`. Should be symetric and non-negative definite N : array_like(float) - N is the cross product term in the payoff, as above. It should + N is the cross product term in the payoff, as above. It should be `k x n`. A : array_like(float) - A is part of the state transition as described above. It should + A is part of the state transition as described above. It should be `n x n` B : array_like(float) - B is part of the state transition as described above. It should + B is part of the state transition as described above. It should be `n x k` C : array_like(float), optional(default=None) C is part of the state transition as described above and @@ -110,14 +111,14 @@ class LQ: def __init__(self, Q, R, A, B, C=None, N=None, beta=1, T=None, Rf=None): # == Make sure all matrices can be treated as 2D arrays == # converter = lambda X: np.atleast_2d(np.asarray(X, dtype='float32')) - self.A, self.B, self.Q, self.R, self.N = \ - list(map(converter, (A, B, Q, R, N))) + self.A, self.B, self.Q, self.R, self.N = list(map(converter, + (A, B, Q, R, N))) # == Record dimensions == # self.k, self.n = self.Q.shape[0], self.R.shape[0] self.beta = beta - if C == None: + if C is None: # == If C not given, then model is deterministic. Set C=0. == # self.j = 1 self.C = np.zeros((self.n, self.j)) @@ -125,7 +126,7 @@ def __init__(self, Q, R, A, B, C=None, N=None, beta=1, T=None, Rf=None): self.C = converter(C) self.j = self.C.shape[1] - if N == None: + if N is None: # == No cross product term in payoff. Set N=0. == # self.N = np.zeros((self.k, self.n)) @@ -258,7 +259,6 @@ def compute_sequence(self, x0, ts_length=None): T = ts_length if ts_length else 100 self.stationary_values() - # == Set up initial condition and arrays to store paths == # x0 = np.asarray(x0) x0 = x0.reshape(self.n, 1) # Make sure x0 is a column vector @@ -280,9 +280,9 @@ def compute_sequence(self, x0, ts_length=None): for t in range(1, T): F = policies.pop() Ax, Bu = dot(A, x_path[:, t-1]), dot(B, u_path[:, t-1]) - x_path[:, t] = Ax + Bu + w_path[:, t] + x_path[:, t] = Ax + Bu + w_path[:, t] u_path[:, t] = - dot(F, x_path[:, t]) Ax, Bu = dot(A, x_path[:, T-1]), dot(B, u_path[:, T-1]) - x_path[:, T] = Ax + Bu + w_path[:, T] + x_path[:, T] = Ax + Bu + w_path[:, T] return x_path, u_path, w_path diff --git a/quantecon/lqnash.py b/quantecon/lqnash.py index f5516dc9b..e78f8f82f 100644 --- a/quantecon/lqnash.py +++ b/quantecon/lqnash.py @@ -12,11 +12,11 @@ def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, problem, player i minimizes .. math:: - \sum_{t=0}^{\infty} + \sum_{t=0}^{\infty} \left\{ x_t' r_i x_t + 2 x_t' w_i u_{it} +u_{it}' q_i u_{it} + u_{jt}' s_i u_{jt} + 2 u_{jt}' - m_i u_{it} + m_i u_{it} \right\} subject to the law of motion @@ -81,7 +81,7 @@ def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, """ # == Unload parameters and make sure everything is an array == # - params = A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 + params = A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 params = map(np.asarray, params) A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2 = params @@ -121,9 +121,9 @@ def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, # break up the computation of F1, F2 F1_left = v1 - dot(H1.dot(B2)+G1.dot(M1.T), - H2.dot(B1)+G2.dot(M2.T)) + H2.dot(B1)+G2.dot(M2.T)) F1_right = H1.dot(A)+G1.dot(W1.T) - dot(H1.dot(B2)+G1.dot(M1.T), - H2.dot(A)+G2.dot(W2.T)) + H2.dot(A)+G2.dot(W2.T)) F1 = solve(F1_left, F1_right) F2 = H2.dot(A)+G2.dot(W2.T) - dot(H2.dot(B1)+G2.dot(M2.T), F1) @@ -133,9 +133,9 @@ def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, Pi2 = R2 + dot(F1.T, S2.dot(F1)) P1 = dot(Lambda1.T, P1.dot(Lambda1)) + Pi1 - \ - dot(dot(Lambda1.T, P1.dot(B1)) + W1 - F2.T.dot(M1), F1) + dot(dot(Lambda1.T, P1.dot(B1)) + W1 - F2.T.dot(M1), F1) P2 = dot(Lambda2.T, P2.dot(Lambda2)) + Pi2 - \ - dot(dot(Lambda2.T, P2.dot(B2)) + W2 - F1.T.dot(M2), F2) + dot(dot(Lambda2.T, P2.dot(B2)) + W2 - F1.T.dot(M2), F2) dd = np.max(np.abs(F10 - F1)) + np.max(np.abs(F20 - F2)) @@ -147,4 +147,3 @@ def nnash(A, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, raise ValueError(msg.format(max_iter)) return F1, F2, P1, P2 - diff --git a/quantecon/lss.py b/quantecon/lss.py index 1035df9d2..2845075cc 100644 --- a/quantecon/lss.py +++ b/quantecon/lss.py @@ -16,8 +16,8 @@ import numpy as np from numpy import dot from numpy.random import multivariate_normal -from scipy.linalg import eig, solve -from .matrix_eqn import solve_discrete_lyapunov +from scipy.linalg import solve + class LSS: """ @@ -67,11 +67,11 @@ def __init__(self, A, C, G, mu_0=None, Sigma_0=None): self.k, self.n = self.G.shape self.m = self.C.shape[1] # == Default initial conditions == # - if mu_0 == None: + if mu_0 is None: self.mu_0 = np.zeros((self.n, 1)) else: self.mu_0 = np.asarray(mu_0) - if Sigma_0 == None: + if Sigma_0 is None: self.Sigma_0 = np.zeros((self.n, self.n)) else: self.Sigma_0 = Sigma_0 @@ -105,7 +105,7 @@ def simulate(self, ts_length=100): """ x = np.empty((self.n, ts_length)) - x[:,0] = multivariate_normal(self.mu_0.flatten(), self.Sigma_0) + x[:, 0] = multivariate_normal(self.mu_0.flatten(), self.Sigma_0) w = np.random.randn(self.m, ts_length-1) for t in range(ts_length-1): x[:, t+1] = self.A.dot(x[:, t]) + self.C.dot(w[:, t]) @@ -230,7 +230,6 @@ def stationary_distributions(self, max_iter=200, tol=1e-5): return mu_x_star, mu_y_star, Sigma_x_star, Sigma_y_star - def geometric_sums(self, beta, x_t): """ Forecast the geometric sums @@ -263,4 +262,3 @@ def geometric_sums(self, beta, x_t): S_y = self.G.dot(S_x) return S_x, S_y - diff --git a/quantecon/matrix_eqn.py b/quantecon/matrix_eqn.py index 2ec5627cc..a7c5ce39d 100644 --- a/quantecon/matrix_eqn.py +++ b/quantecon/matrix_eqn.py @@ -64,7 +64,7 @@ def solve_discrete_lyapunov(A, B, max_it=50, method="doubling"): Represents the value V """ - if method=="doubling": + if method == "doubling": A, B = list(map(np.atleast_2d, [A, B])) alpha0 = A gamma0 = B @@ -87,7 +87,7 @@ def solve_discrete_lyapunov(A, B, max_it=50, method="doubling"): msg = "Exceeded maximum iterations {}, check input matrics" raise ValueError(msg.format(n_its)) - elif method=="bartels-stewart": + elif method == "bartels-stewart": gamma1 = sp_solve_discrete_lyapunov(A, B) else: @@ -149,7 +149,7 @@ def solve_discrete_riccati(A, B, Q, R, N=None, tolerance=1e-10, max_iter=500): A, B, Q, R = np.atleast_2d(A, B, Q, R) n, k = R.shape[0], Q.shape[0] I = np.identity(k) - if N == None: + if N is None: N = np.zeros((n, k)) else: N = np.atleast_2d(N) @@ -171,7 +171,7 @@ def solve_discrete_riccati(A, B, Q, R, N=None, tolerance=1e-10, max_iter=500): f2 = gamma * f1 f3 = np.linalg.cond(I + dot(G0, H0)) f_gamma = max(f1, f2, f3) - if f_gamma < current_min: + if f_gamma < current_min: best_gamma = gamma current_min = f_gamma @@ -183,8 +183,6 @@ def solve_discrete_riccati(A, B, Q, R, N=None, tolerance=1e-10, max_iter=500): gamma = best_gamma R_hat = R + gamma * BB - - # == Initial conditions == # Q_tilde = - Q + dot(N.T, solve(R_hat, N + gamma * BTA)) + gamma * I G0 = dot(B, solve(R_hat, B.T)) @@ -210,4 +208,3 @@ def solve_discrete_riccati(A, B, Q, R, N=None, tolerance=1e-10, max_iter=500): i += 1 return H1 + gamma * I # Return X - diff --git a/quantecon/mc_tools.py b/quantecon/mc_tools.py index 578615c3b..6e383ec5e 100644 --- a/quantecon/mc_tools.py +++ b/quantecon/mc_tools.py @@ -297,9 +297,9 @@ def mc_sample_path(P, init=0, sample_size=1000): return X -#---------------------------------------------------------------------# +# ------------------------------------------------------------------- # # Set up the docstrings for the functions -#---------------------------------------------------------------------# +# ------------------------------------------------------------------- # # For drawing a sample path _sample_path_docstr = \ @@ -325,11 +325,10 @@ def mc_sample_path(P, init=0, sample_size=1000): """ # set docstring for functions -mc_sample_path.__doc__ = _sample_path_docstr.format(p_arg= -"""P : array_like(float, ndim=2) +mc_sample_path.__doc__ = _sample_path_docstr.format(p_arg=""" + P : array_like(float, ndim=2) A Markov transition matrix. - -""") + """) # set docstring for methods diff --git a/quantecon/models/optgrowth.py b/quantecon/models/optgrowth.py index e5c3162a7..dde6f79e1 100644 --- a/quantecon/models/optgrowth.py +++ b/quantecon/models/optgrowth.py @@ -12,6 +12,7 @@ from scipy.optimize import fminbound from scipy import interp + class GrowthModel(object): """ @@ -39,12 +40,11 @@ class GrowthModel(object): """ def __init__(self, f=lambda k: k**0.65, beta=0.95, u=np.log, - grid_max=2, grid_size=150): + grid_max=2, grid_size=150): self.u, self.f, self.beta = u, f, beta self.grid = np.linspace(1e-6, grid_max, grid_size) - def bellman_operator(self, w, compute_policy=False): """ The approximate Bellman operator, which computes and returns the @@ -67,7 +67,7 @@ def bellman_operator(self, w, compute_policy=False): # == set Tw[i] equal to max_c { u(c) + beta w(f(k_i) - c)} == # Tw = np.empty(len(w)) for i, k in enumerate(self.grid): - objective = lambda c: - self.u(c) - self.beta * Aw(self.f(k) - c) + objective = lambda c: - self.u(c) - self.beta * Aw(self.f(k) - c) c_star = fminbound(objective, 1e-6, self.f(k)) if compute_policy: # sigma[i] = argmax_c { u(c) + beta w(f(k_i) - c)} @@ -79,7 +79,6 @@ def bellman_operator(self, w, compute_policy=False): else: return Tw - def compute_greedy(self, w): """ Compute the w-greedy policy on the grid points. diff --git a/quantecon/quad.py b/quantecon/quad.py index 8a1f07ec2..6ea94d6f8 100644 --- a/quantecon/quad.py +++ b/quantecon/quad.py @@ -27,9 +27,9 @@ 'qnwsimp', 'qnwtrap', 'qnwunif', 'quadrect', 'qnwbeta', 'qnwgamma'] -## ------------------ ## -#- Exported Functions -# -## ------------------ ## +# ------------------ # +# Exported Functions # +# ------------------ # def qnwcheb(n, a=1, b=1): @@ -603,9 +603,9 @@ def qnwgamma(n, a=None): """ return _make_multidim_func(_qnwgamma1, n, a) -## ------------------ ## -#- Internal Functions -# -## ------------------ ## +# ------------------ # +# Internal Functions # +# ------------------ # def _make_multidim_func(one_d_func, n, *args): diff --git a/quantecon/rank_nullspace.py b/quantecon/rank_nullspace.py index ad39d16a6..6eb553c41 100644 --- a/quantecon/rank_nullspace.py +++ b/quantecon/rank_nullspace.py @@ -1,6 +1,7 @@ import numpy as np from numpy.linalg import svd + def rank_est(A, atol=1e-13, rtol=0): """ Estimate the rank (i.e. the dimension of the nullspace) of a matrix. @@ -92,4 +93,3 @@ def nullspace(A, atol=1e-13, rtol=0): ns = vh[nnz:].conj().T return ns - diff --git a/quantecon/robustlq.py b/quantecon/robustlq.py index f316ac637..784cc3135 100644 --- a/quantecon/robustlq.py +++ b/quantecon/robustlq.py @@ -15,6 +15,7 @@ from scipy.linalg import solve, inv, det from .matrix_eqn import solve_discrete_lyapunov + class RBLQ: r""" Provides methods for analysing infinite horizon robust LQ control @@ -354,4 +355,3 @@ def evaluate_F(self, F): o_F = (ho + beta * tr) / (1 - beta) return K_F, P_F, d_F, O_F, o_F - diff --git a/quantecon/tauchen.py b/quantecon/tauchen.py index 3c9445707..94a442f0a 100644 --- a/quantecon/tauchen.py +++ b/quantecon/tauchen.py @@ -10,6 +10,7 @@ import numpy as np from scipy.stats import norm + def approx_markov(rho, sigma_u, m=3, n=7): """ Computes the Markov matrix associated with a discretized version of diff --git a/quantecon/timing.py b/quantecon/timing.py index 1105e3ca2..d59471e3c 100644 --- a/quantecon/timing.py +++ b/quantecon/timing.py @@ -5,6 +5,7 @@ Provides Matlab-like tic, tac and toc functions. """ + class __Timer__: '''Computes elapsed time, between tic, tac, and toc. @@ -25,14 +26,12 @@ class __Timer__: def tic(self): """Resets timer.""" - import time t = time.time() self.start = t self.last = t - def tac(self): """Returns and prints time elapsed since last tic()""" @@ -48,7 +47,6 @@ def tac(self): print("TAC: Elapsed: {} seconds.".format(elapsed)) return elapsed - def toc(self): """Returns and prints time elapsed since last tic() or tac() whichever occured last""" @@ -62,20 +60,22 @@ def toc(self): self.last = t elapsed = t-self.start - print("TOC: Elapsed: {} seconds.".format(elapsed)) return elapsed __timer__ = __Timer__() + def tic(): """Saves time for future use with tac or toc.""" return __timer__.tic() + def tac(): """Prints and returns elapsed time since last tic, tac or toc.""" return __timer__.tac() + def toc(): """Prints and returns elapsed time since last tic, tac or toc.""" return __timer__.toc() diff --git a/setup.py b/setup.py index e63f3a135..6197e343a 100644 --- a/setup.py +++ b/setup.py @@ -1,14 +1,23 @@ from distutils.core import setup import os -#-Write a versions.py file for class attribute-# +# Write a versions.py file for class attribute VERSION = '0.1.6' + def write_version_py(filename=None): - doc = "\"\"\"\nThis is a VERSION file and should NOT be manually altered\n\"\"\"" - doc += "\nversion = '%s'" % VERSION - + doc = ''' + """ + This is a VERSION file and should NOT be manually altered + + """ + + version = "%s" + ''' % VERSION + + return doc + if not filename: filename = os.path.join( os.path.dirname(__file__), 'quantecon', 'version.py') @@ -21,7 +30,7 @@ def write_version_py(filename=None): write_version_py() -#-Setup-# +# Setup setup(name='quantecon', packages=['quantecon', 'quantecon.models', "quantecon.tests"], @@ -29,6 +38,6 @@ def write_version_py(filename=None): description='Core package of the QuantEcon library', author='Thomas J. Sargent and John Stachurski (Project coordinators)', author_email='john.stachurski@gmail.com', - url='https://github.com/QuantEcon/QuantEcon.py', # URL to the github repo + url='https://github.com/QuantEcon/QuantEcon.py', # URL to the repo keywords=['quantitative', 'economics'] )