%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;}
# keep output cells from shifting to autoscroll: little scrolling
# subwindows within the notebook are an annoyance...
# set up the environment by reading in every library we might need:
# os... graphics... data manipulation... time... math... statistics...
import sys
import os
from urllib.request import urlretrieve
import matplotlib as mpl
import matplotlib.pyplot as plt
from IPython.display import Image
import pandas as pd
from pandas import DataFrame, Series
from datetime import datetime
import scipy as sp
import numpy as np
import math
import random
import seaborn as sns
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
# report library versions...
/Users/delong/anaconda3/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead. from pandas.core import datetools
%matplotlib inline
# put graphs into the notebook itself...
# graphics setup: seaborn-whitegrid and figure size...
plt.style.use('seaborn-whitegrid')
figure_size = plt.rcParams["figure.figsize"]
figure_size[0] = 12
figure_size[1] = 10
plt.rcParams["figure.figsize"] = figure_size
# Import series and stuff it in a dictionary
# reading in the previously-downloaded long-run real GDP and
# GDP per capita file from Johnston and Williamson's "Measuring
# Worth" website
#
# constructing a dataframe to hold the data, and then wrapping
# that dataframe inside a dict object to append source notes and
# links
Source_URL = 'http://delong.typepad.com/2018-03-31_q_unemp15-64_realgdp-1.csv'
GDPUN1564_df = pd.read_csv(
Source_URL,
parse_dates = True,
index_col = 0)
GDPUN1564_dict = {}
GDPUN1564_dict["Source"] = "FRED: http://fred.stlouisfed.org"
GDPUN1564_dict["SourceURL"] = Source_URL
GDPUN1564_dict["SourceNotes"] = "Quarterly: Real GDP (2009) and 15-64 Unemployment Rate"
GDPUN1564_dict["SourceDescription"] = "https://fred.stlouisfed.org/graph/?id=LRUN64TTUSQ156S, https://fred.stlouisfed.org/graph/?id=GDPC1,"
GDPUN1564_dict["df"] = GDPUN1564_df
GDPUN1564_df.head()
# create an observation number variable and a
# log real GDP variable in the GDPUN1564_df
GDPUN1564_df.insert(0, 'Observation', range(0, len(GDPUN1564_df)))
GDPUN1564_df["LN_RGDP"] = np.log(GDPUN1564_df.RGDP)
GDPUN1564_df["UN_lag"] = GDPUN1564_df.UNEMP1564.shift(4)
import statsmodels.api as sm
import statsmodels.formula.api as smf
Unemployment = GDPUN1564_df.UNEMP1564[4:]
X = GDPUN1564_df.UN_lag[4:]
Unemployment_Lagged_1_Year = sm.add_constant(X)
results = smf.OLS(Unemployment, Unemployment_Lagged_1_Year).fit()
print(results.summary())
OLS Regression Results ============================================================================== Dep. Variable: UNEMP1564 R-squared: 0.564 Model: OLS Adj. R-squared: 0.562 Method: Least Squares F-statistic: 241.0 Date: Fri, 06 Apr 2018 Prob (F-statistic): 2.12e-35 Time: 15:13:55 Log-Likelihood: -273.57 No. Observations: 188 AIC: 551.1 Df Residuals: 186 BIC: 557.6 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ const 1.5478 0.323 4.796 0.000 0.911 2.185 UN_lag 0.7572 0.049 15.523 0.000 0.661 0.853 ============================================================================== Omnibus: 60.884 Durbin-Watson: 0.157 Prob(Omnibus): 0.000 Jarque-Bera (JB): 119.856 Skew: 1.577 Prob(JB): 9.41e-27 Kurtosis: 5.315 Cond. No. 28.7 ============================================================================== Warnings: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
import statsmodels.api as sm
import statsmodels.formula.api as smf
Unemployment = GDPUN1564_df.UNEMP1564[4:150]
X = GDPUN1564_df.UN_lag[4:150]
Unemployment_Lagged_1_Year = sm.add_constant(X)
results = smf.OLS(Unemployment, Unemployment_Lagged_1_Year).fit()
print(results.summary())
OLS Regression Results ============================================================================== Dep. Variable: UNEMP1564 R-squared: 0.552 Model: OLS Adj. R-squared: 0.549 Method: Least Squares F-statistic: 177.3 Date: Fri, 06 Apr 2018 Prob (F-statistic): 7.08e-27 Time: 15:14:52 Log-Likelihood: -199.57 No. Observations: 146 AIC: 403.1 Df Residuals: 144 BIC: 409.1 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ const 1.5707 0.361 4.352 0.000 0.857 2.284 UN_lag 0.7476 0.056 13.317 0.000 0.637 0.859 ============================================================================== Omnibus: 43.139 Durbin-Watson: 0.186 Prob(Omnibus): 0.000 Jarque-Bera (JB): 73.594 Skew: 1.445 Prob(JB): 1.05e-16 Kurtosis: 4.934 Cond. No. 30.0 ============================================================================== Warnings: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
import statsmodels.api as sm
import statsmodels.formula.api as smf
Unemployment = GDPUN1564_df.UNEMP1564[151:]
X = GDPUN1564_df.UN_lag[151:]
Unemployment_Lagged_1_Year = sm.add_constant(X)
results = smf.OLS(Unemployment, Unemployment_Lagged_1_Year).fit()
print(results.summary())
OLS Regression Results ============================================================================== Dep. Variable: UNEMP1564 R-squared: 0.535 Model: OLS Adj. R-squared: 0.523 Method: Least Squares F-statistic: 44.86 Date: Fri, 06 Apr 2018 Prob (F-statistic): 5.58e-08 Time: 15:14:46 Log-Likelihood: -68.941 No. Observations: 41 AIC: 141.9 Df Residuals: 39 BIC: 145.3 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ const 1.8016 0.806 2.235 0.031 0.171 3.432 UN_lag 0.7413 0.111 6.698 0.000 0.517 0.965 ============================================================================== Omnibus: 15.916 Durbin-Watson: 0.097 Prob(Omnibus): 0.000 Jarque-Bera (JB): 17.727 Skew: 1.478 Prob(JB): 0.000141 Kurtosis: 4.278 Cond. No. 28.7 ============================================================================== Warnings: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
import statsmodels.api as sm
import statsmodels.formula.api as smf
Unemp = GDPUN1564_df.UNEMP1564[151:]
Un_lag = GDPUN1564_df.UN_lag[151:]
Un_lag = sm.add_constant(Un_lag)
results = smf.OLS(Unemp, Un_lag).fit()
results.summary()
Dep. Variable: | UNEMP1564 | R-squared: | 0.535 |
---|---|---|---|
Model: | OLS | Adj. R-squared: | 0.523 |
Method: | Least Squares | F-statistic: | 44.86 |
Date: | Fri, 06 Apr 2018 | Prob (F-statistic): | 5.58e-08 |
Time: | 13:48:29 | Log-Likelihood: | -68.941 |
No. Observations: | 41 | AIC: | 141.9 |
Df Residuals: | 39 | BIC: | 145.3 |
Df Model: | 1 | ||
Covariance Type: | nonrobust |
coef | std err | t | P>|t| | [0.025 | 0.975] | |
---|---|---|---|---|---|---|
const | 1.8016 | 0.806 | 2.235 | 0.031 | 0.171 | 3.432 |
UN_lag | 0.7413 | 0.111 | 6.698 | 0.000 | 0.517 | 0.965 |
Omnibus: | 15.916 | Durbin-Watson: | 0.097 |
---|---|---|---|
Prob(Omnibus): | 0.000 | Jarque-Bera (JB): | 17.727 |
Skew: | 1.478 | Prob(JB): | 0.000141 |
Kurtosis: | 4.278 | Cond. No. | 28.7 |
# convergence to the balanced growth path
#
# we need to alter our dataframe in order to add a BGP line
#
# # we are going to want to see what happens for lots of
# different model parameter values and initial conditions,
# so stuff our small simulation program inside a function, so
# we can then invoke it with a single line...
def sgm_bgp_100yr_run(L0, E0, K0, n=0.01,
g=0.02, s=0.15, alpha=0.5, delta=0.03,
T = 100):
sg_df = pd.DataFrame(index=range(T),columns=['Labor',
'Efficiency',
'Capital',
'Output',
'Output_per_Worker',
'Capital_Output_Ratio',
'BGP_Output',
'BGP_Output_per_Worker',
'BGP_Capital_Output_Ratio',
'BGP_Capital'],
dtype='float')
sg_df.Labor[0] = L0
sg_df.Efficiency[0] = E0
sg_df.Capital[0] = K0
sg_df.Output[0] = (sg_df.Capital[0]**alpha * (sg_df.Labor[0] *
sg_df.Efficiency[0])**(1-alpha))
sg_df.Output_per_Worker[0] = sg_df.Output[0]/sg_df.Labor[0]
sg_df.Capital_Output_Ratio[0] = sg_df.Capital[0]/sg_df.Output[0]
sg_df.BGP_Capital_Output_Ratio[0] = (s / (n + g + delta))
sg_df.BGP_Output_per_Worker[0] = sg_df.Efficiency[0] * (
sg_df.BGP_Capital_Output_Ratio[0]*(alpha/(1 - alpha)))
sg_df.BGP_Output[0] = sg_df.BGP_Output_per_Worker[0] * sg_df.Labor[0]
sg_df.BGP_Capital[0] = sg_df.Labor[0] * sg_df.Efficiency[0] * (
sg_df.BGP_Capital_Output_Ratio[0]*(1/(1 - alpha)))
for i in range(T):
sg_df.Labor[i+1] = sg_df.Labor[i] + sg_df.Labor[i] * n
sg_df.Efficiency[i+1] = sg_df.Efficiency[i] + sg_df.Efficiency[i] * g
sg_df.Capital[i+1] = sg_df.Capital[i] - sg_df.Capital[i] * delta + (
sg_df.Output[i] * s)
sg_df.Output[i+1] = (sg_df.Capital[i+1]**alpha *
(sg_df.Labor[i+1] * sg_df.Efficiency[i+1])**(1-alpha))
sg_df.Output_per_Worker[i+1] = sg_df.Output[i+1]/sg_df.Labor[i+1]
sg_df.Capital_Output_Ratio[i+1] = (sg_df.Capital[i+1]/
sg_df.Output[i+1])
sg_df.BGP_Capital_Output_Ratio[i+1] = (s / (n + g + delta))
sg_df.BGP_Output_per_Worker[i+1] = sg_df.Efficiency[i+1] * (
sg_df.BGP_Capital_Output_Ratio[i+1]**(alpha/(1 - alpha)))
sg_df.BGP_Output[i+1] = (sg_df.BGP_Output_per_Worker[i+1] *
sg_df.Labor[i+1])
sg_df.BGP_Capital[i+1] = (s / (n + g + delta))**(1/(1-alpha)) * (
sg_df.Efficiency[i+1] * sg_df.Labor[i+1])
fig = plt.figure(figsize=(12, 12))
ax1 = plt.subplot(3,2,1)
sg_df.Labor.plot(ax = ax1, title = "Labor Force")
plt.ylabel("Parameters")
plt.ylim(0, )
ax2 = plt.subplot(3,2,2)
sg_df.Efficiency.plot(ax = ax2, title = "Efficiency of Labor")
plt.ylim(0, )
ax3 = plt.subplot(3,2,3)
sg_df.BGP_Capital.plot(ax = ax3, title = "BGP Capital Stock")
sg_df.Capital.plot(ax = ax3, title = "Capital Stock")
plt.ylabel("Values")
plt.ylim(0, )
ax4 = plt.subplot(3,2,4)
sg_df.BGP_Output.plot(ax = ax4, title = "BGP Output")
sg_df.Output.plot(ax = ax4, title = "Output")
plt.ylim(0, )
ax5 = plt.subplot(3,2,5)
sg_df.BGP_Output_per_Worker.plot(ax = ax5,
title = "BGP Output per Worker")
sg_df.Output_per_Worker.plot(ax = ax5, title = "Output per Worker")
plt.xlabel("Years")
plt.ylabel("Ratios")
plt.ylim(0, )
ax6 = plt.subplot(3,2,6)
sg_df.BGP_Capital_Output_Ratio.plot(ax = ax6,
title = "BGP Capital-Output Ratio")
sg_df.Capital_Output_Ratio.plot(ax = ax6,
title = "Capital-Output Ratio")
plt.xlabel("Years")
plt.ylim(0, )
plt.suptitle('Solow Growth Model: Simulation Run', size = 20)
plt.show()
print(n, "is the labor force growth rate")
print(g, "is the efficiency of labor growth rate")
print(delta, "is the depreciation rate")
print(s, "is the savings rate")
print(alpha, "is the decreasing-returns-to-scale parameter")
sgm_bgp_100yr_run(1000, 1, 100, n=0.005, g=0.01,
s=0.225, alpha=0.5, delta=0.03)
# convergence to the balanced growth path—log graphs
#
# we need to alter our dataframe in order to add a BGP line
#
# # we are going to want to see what happens for lots of
# different model parameter values and initial conditions,
# so stuff our small simulation program inside a function, so
# we can then invoke it with a single line...
def log_sgm_bgp_100yr_run(L0, E0, K0, n=0.01, g=0.02,
s=0.15, alpha=0.5, delta=0.03, T=100):
sg_df = pd.DataFrame(index=range(T),columns=['Labor',
'Efficiency',
'Capital',
'Output',
'Output_per_Worker',
'Capital_Output_Ratio',
'BGP_Output',
'BGP_Output_per_Worker',
'BGP_Capital_Output_Ratio',
'BGP_Capital'],
dtype='float')
sg_df.Labor[0] = L0
sg_df.Efficiency[0] = E0
sg_df.Capital[0] = K0
sg_df.Output[0] = (sg_df.Capital[0]**alpha * (sg_df.Labor[0] * sg_df.Efficiency[0])**(1-alpha))
sg_df.Output_per_Worker[0] = sg_df.Output[0]/sg_df.Labor[0]
sg_df.Capital_Output_Ratio[0] = sg_df.Capital[0]/sg_df.Output[0]
sg_df.BGP_Capital_Output_Ratio[0] = (s / (n + g + delta))
sg_df.BGP_Output_per_Worker[0] = sg_df.Efficiency[0] * sg_df.BGP_Capital_Output_Ratio[0]*(alpha/(1 - alpha))
sg_df.BGP_Output[0] = sg_df.BGP_Output_per_Worker[0] * sg_df.Labor[0]
sg_df.BGP_Capital[0] = (s / (n + g + delta))**(1/(1-alpha)) * sg_df.Efficiency[0] * sg_df.Labor[0]
for i in range(T):
sg_df.Labor[i+1] = sg_df.Labor[i] + sg_df.Labor[i] * n
sg_df.Efficiency[i+1] = sg_df.Efficiency[i] + sg_df.Efficiency[i] * g
sg_df.Capital[i+1] = sg_df.Capital[i] - sg_df.Capital[i] * delta + sg_df.Output[i] * s
sg_df.Output[i+1] = (sg_df.Capital[i+1]**alpha * (sg_df.Labor[i+1] * sg_df.Efficiency[i+1])**(1-alpha))
sg_df.Output_per_Worker[i+1] = sg_df.Output[i+1]/sg_df.Labor[i+1]
sg_df.Capital_Output_Ratio[i+1] = sg_df.Capital[i+1]/sg_df.Output[i+1]
sg_df.BGP_Capital_Output_Ratio[i+1] = (s / (n + g + delta))
sg_df.BGP_Output_per_Worker[i+1] = sg_df.Efficiency[i+1] * sg_df.BGP_Capital_Output_Ratio[i+1]**(alpha/(1 - alpha))
sg_df.BGP_Output[i+1] = sg_df.BGP_Output_per_Worker[i+1] * sg_df.Labor[i+1]
sg_df.BGP_Capital[i+1] = (s / (n + g + delta))**(1/(1-alpha)) * sg_df.Efficiency[i+1] * sg_df.Labor[i+1]
fig = plt.figure(figsize=(12, 12))
ax1 = plt.subplot(3,2,1)
np.log(sg_df.Labor).plot(ax = ax1, title = "Labor Force")
plt.ylabel("Log Values")
plt.ylim(0, )
ax2 = plt.subplot(3,2,2)
np.log(sg_df.Efficiency).plot(ax = ax2, title = "Efficiency of Labor")
plt.ylim(0, )
ax3 = plt.subplot(3,2,3)
np.log(sg_df.BGP_Capital).plot(ax = ax3, title = "BGP Capital Stock")
np.log(sg_df.Capital).plot(ax = ax3, title = "Capital Stock")
plt.ylabel("Log Values")
plt.ylim(0, )
ax4 = plt.subplot(3,2,4)
np.log(sg_df.BGP_Output).plot(ax = ax4, title = "BGP Output")
np.log(sg_df.Output).plot(ax = ax4, title = "Output")
plt.ylim(0, )
ax5 = plt.subplot(3,2,5)
np.log(sg_df.BGP_Output_per_Worker).plot(ax = ax5, title = "BGP Output per Worker")
np.log(sg_df.Output_per_Worker).plot(ax = ax5, title = "Output per Worker")
plt.xlabel("Years")
plt.ylabel("Log Ratios")
plt.ylim(0, )
ax6 = plt.subplot(3,2,6)
np.log(sg_df.BGP_Capital_Output_Ratio).plot(ax = ax6, title = "BGP Capital-Output Ratio")
np.log(sg_df.Capital_Output_Ratio).plot(ax = ax6, title = "Capital-Output Ratio")
plt.xlabel("Years")
plt.ylim(0, )
plt.suptitle('Solow Growth Model: Simulation Run: Log Scale', size = 20)
plt.show()
print(n, "is the labor force growth rate")
print(g, "is the efficiency of labor growth rate")
print(delta, "is the depreciation rate")
print(s, "is the savings rate")
print(alpha, "is the decreasing-returns-to-scale parameter")
log_sgm_bgp_100yr_run(1000, 1, 100, n=0.005, g=0.01, s=0.24,
alpha=0.5, delta=0.03)
# suppose we started the economy on some balanced growth path, say
# for s = 0.20. And then s jumped to 0.25. What would happen?
#
# n=0.01, g=0.01, delta=0.03, s=0.20, alpha=0.5...
# SS K/Y = 4...
# Y/L = 4 x E
# K/L = 16 x E
#
# start the economy on its balanced growth path...
log_sgm_bgp_100yr_run(1000, 1, 16000, n=0.01, g=0.01, s=0.20,
alpha=0.5, delta=0.03)
# yup...
# we can look at it in levels too:
sgm_bgp_100yr_run(1000, 1, 16000, n=0.01, g=0.01,
s=0.20, alpha=0.5, delta=0.03)
# Now, from the s = 0.20 BGP, what happens if we suddenly jump s to 0.25
# and keep it there?
#
# This happens:
# in levels:
sgm_bgp_100yr_run(1000, 1, 16000, n=0.01, g=0.01,
s=0.25, alpha=0.5, delta=0.03)
log_sgm_bgp_100yr_run(1000, 1, 16000, n=0.01, g=0.01, s=0.25,
alpha=0.5, delta=0.03)
# in the U.S. today:
n = 0.01
g = 0.015
delta = 0.03
sinitial = 0.22
alpha = 0.333
KoverYstarinitial = sinitial/(n + g + delta)
E = 65068
YoverLstarinitial = KoverYstarinitial**(alpha/(1-alpha)) * E
print(KoverYstarinitial, "= KoverYstarinitial")
print(YoverLstarinitial, "= YoverLstarinitial")
# a "what if"—if the tax "reform" were to boost the savings rate by
# 1.4% points...
import numpy as np
n = 0.01
g = 0.015
delta = 0.03
sfinal = 0.234
alpha = 0.333
KoverYstarfinal = sfinal/(n + g + delta)
E = 65068
YoverLstarfinal = KoverYstarfinal**(alpha/(1-alpha)) * E
long_run_growth_effect = np.log(YoverLstarfinal/YoverLstarinitial)
print(KoverYstarfinal, "= KoverYstarfinal")
print(YoverLstarfinal, "= YoverLstarfinal")
print(np.log(YoverLstarfinal/YoverLstarinitial), "= long-run growth effect")
# speed of convergence
speed_of_convergence = ((1 - alpha)*
(n+g+delta))
print(speed_of_convergence,
"= the speed of convergence")
initial_year_growth_boost = (long_run_growth_effect
* speed_of_convergence)
print(initial_year_growth_boost,
"= initial year growth boost")
Quoting the four Stanford economists (plus five others):
https://www.wsj.com/articles/how-tax-reform-will-lift-the-economy-1511729894?mg=prod/accounts-wsj
# what is (1 - alpha)(n + g + delta) here?
#
# (1 - alpha) = 2/3
# (n + g + delta) = .045
convergence_speed = 2/3 * .045
print(convergence_speed, "= convergence speed")
for i in range(11):
print(.03 - .03 * np.exp(-convergence_speed * i), "= growth over", i, "years")
##### what if we raise alpha to the "DeLong Summers" value?
#
# note: Brad DeLong and Larry Summers do **not** believe
# that the tax "reform" will raise the savings rate by
# 1.34%: this is a "what if" exercise that, as far as I
# know, is not the analytical position of anybody:
n = 0.01
g = 0.015
delta = 0.03
sinitial = 0.22
sfinal = 0.234
alpha = 0.55
KoverYstarinitial = sinitial/(n + g + delta)
KoverYstarfinal = sfinal/(n + g + delta)
E = 22000
YoverLstarfinal = KoverYstarfinal**(alpha/(1-alpha)) * E
YoverLstarinitial = KoverYstarinitial**(alpha/(1-alpha)) * E
print(KoverYstarfinal, "= KoverYstarfinal")
print(YoverLstarfinal, "= YoverLstarfinal")
print(np.log(YoverLstarfinal/YoverLstarinitial),
"= long-run growth effect")
print((1 - alpha)*(n+g+delta), "= speed of convergence")
print((1 - alpha)*(n+g+delta)*
np.log(YoverLstarfinal/YoverLstarinitial),
"= first year growth effect")
# raising the alpha parameter even further...
n = 0.01
g = 0.015
delta = 0.03
sinitial = 0.22
sfinal = 0.234
alpha = 0.667
KoverYstarinitial = sinitial/(n + g + delta)
KoverYstarfinal = sfinal/(n + g + delta)
E = 7151
YoverLstarfinal = KoverYstarfinal**(alpha/(1-alpha)) * E
YoverLstarinitial = KoverYstarinitial**(alpha/(1-alpha)) * E
print(KoverYstarfinal, "= KoverYstarfinal")
print(YoverLstarfinal, "= YoverLstarfinal")
print(np.log(YoverLstarfinal/YoverLstarinitial), "= long-run growth effect")