This notebook conducts a quick and dirty structural estimation based on Table 9 of "MPC Heterogeneity and Household Balance Sheets" by Fagereng, Holm, and Natvik , who use Norweigian administrative data on income, household assets, and lottery winnings to examine the MPC from transitory income shocks (lottery prizes). Their Table 9 reports an estimated MPC broken down by quartiles of bank deposits and prize size; this table is reproduced here as $\texttt{MPC\_target\_base}$. In this demo, we use the Table 9 estimates as targets in a simple structural estimation, seeking to minimize the sum of squared differences between simulated and estimated MPCs by changing the (uniform) distribution of discount factors. The essential question is how well their results be rationalized by a simple one-asset consumption-saving model. (Note that the paper was later published under a different version which unfortunately excluded table 9.)
The function that estimates discount factors includes several options for estimating different specifications:
# Import python tools
import numpy as np
from copy import deepcopy
# Import needed tools from HARK
from HARK.distribution import Uniform
from HARK.utilities import get_percentiles
from HARK.estimation import minimize_nelder_mead
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
init_infinite = {
"CRRA": 1.0, # Coefficient of relative risk aversion
"Rfree": 1.01 / (1.0 - 1.0 / 160.0), # Survival probability,
# Permanent income growth factor (no perm growth),
"PermGroFac": [1.000**0.25],
"PermGroFacAgg": 1.0,
"BoroCnstArt": 0.0,
"CubicBool": False,
"vFuncBool": False,
"PermShkStd": [
(0.01 * 4 / 11) ** 0.5
], # Standard deviation of permanent shocks to income
"PermShkCount": 5, # Number of points in permanent income shock grid
"TranShkStd": [
(0.01 * 4) ** 0.5
], # Standard deviation of transitory shocks to income,
"TranShkCount": 5, # Number of points in transitory income shock grid
"UnempPrb": 0.07, # Probability of unemployment while working
"IncUnemp": 0.15, # Unemployment benefit replacement rate
"UnempPrbRet": 0.07,
"IncUnempRet": 0.15,
"aXtraMin": 0.00001, # Minimum end-of-period assets in grid
"aXtraMax": 40, # Maximum end-of-period assets in grid
"aXtraCount": 32, # Number of points in assets grid
"aXtraExtra": [None],
"aXtraNestFac": 3, # Number of times to 'exponentially nest' when constructing assets grid
"LivPrb": [1.0 - 1.0 / 160.0], # Survival probability
"DiscFac": 0.97, # Default intertemporal discount factor; dummy value, will be overwritten
"cycles": 0,
"T_cycle": 1,
"T_retire": 0,
# Number of periods to simulate (idiosyncratic shocks model, perpetual youth)
"T_sim": 1200,
"T_age": 400,
"IndL": 10.0 / 9.0, # Labor supply per individual (constant),
"aNrmInitMean": np.log(0.00001),
"aNrmInitStd": 0.0,
"pLvlInitMean": 0.0,
"pLvlInitStd": 0.0,
"AgentCount": 10000,
}
# Set key problem-specific parameters
TypeCount = 8 # Number of consumer types with heterogeneous discount factors
AdjFactor = 1.0 # Factor by which to scale all of MPCs in Table 9
T_kill = 100 # Don't let agents live past this age
Splurge = 0.0 # Consumers automatically spend this amount of any lottery prize
do_secant = True # If True, calculate MPC by secant, else point MPC
drop_corner = False # If True, ignore upper left corner when calculating distance
# Set standard HARK parameter values
base_params = deepcopy(init_infinite)
base_params["LivPrb"] = [0.975]
base_params["Rfree"] = 1.04 / base_params["LivPrb"][0]
base_params["PermShkStd"] = [0.1]
base_params["TranShkStd"] = [0.1]
base_params[
"T_age"
] = T_kill # Kill off agents if they manage to achieve T_kill working years
base_params["AgentCount"] = 10000
# From Table 1, in thousands of USD
base_params["pLvlInitMean"] = np.log(23.72)
base_params[
"T_sim"
] = T_kill # No point simulating past when agents would be killed off
# Define the MPC targets from Fagereng et al Table 9; element i,j is lottery quartile i, deposit quartile j
MPC_target_base = np.array(
[
[1.047, 0.745, 0.720, 0.490],
[0.762, 0.640, 0.559, 0.437],
[0.663, 0.546, 0.390, 0.386],
[0.354, 0.325, 0.242, 0.216],
]
)
MPC_target = AdjFactor * MPC_target_base
# Define the four lottery sizes, in thousands of USD; these are eyeballed centers/averages
lottery_size = np.array([1.625, 3.3741, 7.129, 40.0])
# Make several consumer types to be used during estimation
BaseType = IndShockConsumerType(**base_params)
EstTypeList = []
for j in range(TypeCount):
EstTypeList.append(deepcopy(BaseType))
EstTypeList[-1].seed = j
# Define the objective function
def FagerengObjFunc(center, spread, verbose=False):
"""
Objective function for the quick and dirty structural estimation to fit
Fagereng, Holm, and Natvik's Table 9 results with a basic infinite horizon
consumption-saving model (with permanent and transitory income shocks).
Parameters
----------
center : float
Center of the uniform distribution of discount factors.
spread : float
Width of the uniform distribution of discount factors.
verbose : bool
When True, print to screen MPC table for these parameters. When False,
print (center, spread, distance).
Returns
-------
distance : float
Euclidean distance between simulated MPCs and (adjusted) Table 9 MPCs.
"""
# Give our consumer types the requested discount factor distribution
beta_set = (
Uniform(bot=center - spread, top=center + spread)
.discretize(N=TypeCount)
.atoms.flatten()
)
for j in range(TypeCount):
EstTypeList[j].DiscFac = beta_set[j]
# Solve and simulate all consumer types, then gather their wealth levels
for EstType in EstTypeList:
EstType.solve()
EstType.initialize_sim()
EstType.simulate()
EstType.unpack("cFunc")
WealthNow = np.concatenate([ThisType.state_now["aLvl"] for ThisType in EstTypeList])
# Get wealth quartile cutoffs and distribute them to each consumer type
quartile_cuts = get_percentiles(WealthNow, percentiles=[0.25, 0.50, 0.75])
for ThisType in EstTypeList:
WealthQ = np.zeros(ThisType.AgentCount, dtype=int)
for n in range(3):
WealthQ[ThisType.state_now["aLvl"] > quartile_cuts[n]] += 1
ThisType.WealthQ = WealthQ
# Keep track of MPC sets in lists of lists of arrays
MPC_set_list = [
[[], [], [], []],
[[], [], [], []],
[[], [], [], []],
[[], [], [], []],
]
# Calculate the MPC for each of the four lottery sizes for all agents
for ThisType in EstTypeList:
ThisType.simulate(1)
c_base = ThisType.controls["cNrm"]
MPC_this_type = np.zeros((ThisType.AgentCount, 4))
for k in range(4): # Get MPC for all agents of this type
Llvl = lottery_size[k]
Lnrm = Llvl / ThisType.state_now["pLvl"]
if do_secant:
SplurgeNrm = Splurge / ThisType.state_now["pLvl"]
mAdj = ThisType.state_now["mNrm"] + Lnrm - SplurgeNrm
cAdj = ThisType.cFunc[0](mAdj) + SplurgeNrm
MPC_this_type[:, k] = (cAdj - c_base) / Lnrm
else:
mAdj = ThisType.state_now["mNrm"] + Lnrm
MPC_this_type[:, k] = cAdj = ThisType.cFunc[0].derivative(mAdj)
# Sort the MPCs into the proper MPC sets
for q in range(4):
these = ThisType.WealthQ == q
for k in range(4):
MPC_set_list[k][q].append(MPC_this_type[these, k])
# Calculate average within each MPC set
simulated_MPC_means = np.zeros((4, 4))
for k in range(4):
for q in range(4):
MPC_array = np.concatenate(MPC_set_list[k][q])
simulated_MPC_means[k, q] = np.mean(MPC_array)
# Calculate Euclidean distance between simulated MPC averages and Table 9 targets
diff = simulated_MPC_means - MPC_target
if drop_corner:
diff[0, 0] = 0.0
distance = np.sqrt(np.sum((diff) ** 2))
if verbose:
print(simulated_MPC_means)
else:
print(center, spread, distance)
return distance
# Conduct the estimation
guess = [0.92, 0.03]
def f_temp(x):
return FagerengObjFunc(x[0], x[1])
opt_params = minimize_nelder_mead(f_temp, guess, verbose=False)
print(
"Finished estimating for scaling factor of "
+ str(AdjFactor)
+ ' and "splurge amount" of $'
+ str(1000 * Splurge)
)
print("Optimal (beta,nabla) is " + str(opt_params) + ", simulated MPCs are:")
dist = FagerengObjFunc(opt_params[0], opt_params[1], True)
print("Distance from Fagereng et al Table 9 is " + str(dist))
0.92 0.03 1.1196898999208702 0.9660000000000001 0.03 1.8557370146227723 0.92 0.0315 1.1205388789300472 0.874 0.0315 0.7762009314641924 0.8280000000000001 0.03225 0.6794629566371587 0.8280000000000002 0.03075 0.6812764349781586 0.7360000000000001 0.033 0.7910782500357642 0.782 0.03225 0.701480126254866 0.8740000000000001 0.03075 0.776917731194195 0.805 0.031875 0.6784586313768969 0.8049999999999998 0.033375 0.6766778040100674 0.7934999999999997 0.03468750000000001 0.6842925413106543 0.7819999999999998 0.033 0.7005977272821906 0.8165 0.0324375 0.6747927160969063 0.8164999999999997 0.033937499999999995 0.6730511351615501 0.8222499999999997 0.03496874999999999 0.6729548684813017 0.8337499999999999 0.03403125 0.6818984152434335 0.8121874999999998 0.0335390625 0.6738051373790596 0.8179374999999995 0.036070312499999986 0.6705629741922412 0.8186562499999992 0.03788671874999998 0.6681951147478101 0.8287187499999991 0.03931640624999997 0.6708742365404755 0.8251249999999986 0.04223437499999996 0.664296531186165 0.8265624999999979 0.04586718749999995 0.6598362485225537 0.816499999999998 0.044437499999999956 0.6588191494676393 0.8103906249999975 0.04699804687499996 0.6560435981007816 0.8182968749999961 0.05497851562499993 0.6429622106958304 0.8181171874999946 0.0635244140624999 0.6283719404390292 0.8019453124999942 0.06465527343749991 0.6317158378919989 0.8096718749999914 0.08118164062499984 0.5987059095800152 0.8093124999999883 0.09827343749999978 0.5681565783197481 0.8254843749999887 0.09714257812499977 0.5764414055133064 0.8166796874999824 0.13189160156249966 0.5323676419241254 0.8159609374999763 0.16607519531249954 0.5904223685318514 0.800507812499982 0.13302246093749967 0.5163117765181638 0.7880195312499785 0.15096240234374964 0.5047124560283467 0.7953867187499726 0.18458056640624948 0.5617132527925662 0.7988681640624766 0.16300378417968706 0.5108506441527563 0.7702080078124727 0.182074584960937 0.5287581823368499 0.78182592773435 0.16952883911132766 0.5070351369307667 0.7709772949218519 0.15748745727539024 0.5280528475238496 0.7918954467773205 0.16162470245361285 0.5017035822390945 0.798089050292949 0.14305826568603486 0.5061860549357586 0.7940232696532993 0.14967590904235806 0.5028256236143316 0.7978991851806412 0.16033820915222124 0.5063050345595199 0.7904894447326442 0.15330635404586754 0.5021984920087686 0.7883616218566655 0.16525514745712233 0.5024660695309078 0.7897770338058239 0.16136033785343126 0.5012012963060823 0.7911830358505002 0.16967868626117658 0.5077537001985432 0.7906628425121082 0.15739943709969478 0.5010198321545949 0.7885444295406117 0.15713507249951317 0.501600543157581 0.7893821838497889 0.15825747998803807 0.5011177990197709 0.7902679925560732 0.1542965792343016 0.5019956034989637 0.7898997734933861 0.15959439819864885 0.500990160961901 0.7911804321557053 0.15873635531030555 0.5009410672976718 0.7920795563086636 0.15897579297143927 0.5010530968741752 0.7904173631369834 0.16093131640925962 0.5011231702147277 0.790601472668327 0.158282406927086 0.5009608820931558 0.7918821313306463 0.15742436403874271 0.5010716288897102 0.7903953629527012 0.1590518896586723 0.5009804079463754 0.7913865418713313 0.15796687257871925 0.5010066892811852 0.7906431576823587 0.15878063538868403 0.500971752393584 0.7911387471416738 0.15823812684870753 0.5009681043577296 0.7910148497768451 0.15837375398370165 0.5009669832514088 0.7907670550471874 0.1586450082536899 0.5009566726168533 0.7913460145345657 0.15909895663690948 0.5009794669723293 0.7907876081348867 0.15848654435454188 0.5009538516244135 0.7912009852434047 0.1585778914111575 0.5009527296683954 0.7915938092642233 0.15882770236692115 0.5009772796966896 0.7909891584172208 0.15857183385763668 0.5009391718629084 0.7909686053295214 0.15873029775678474 0.5009633296184927 0.7911428902649338 0.1586159929975643 0.5009474487098768 0.7910267003079923 0.15869219617037794 0.5009522371916169 0.7911138427756984 0.1586350437907677 0.5009511323341336 0.791084795286463 0.15865409458397112 0.5009431227606922 0.7910660243410773 0.15859391342760049 0.5009656806073184 Finished estimating for scaling factor of 1.0 and "splurge amount" of $0.0 Optimal (beta,nabla) is [0.79098916 0.15857183], simulated MPCs are: [[0.77304235 0.68123007 0.56397879 0.40685965] [0.74299194 0.66281567 0.55224436 0.39833641] [0.70298354 0.63332607 0.529786 0.38134206] [0.56111798 0.50265972 0.41152609 0.294467 ]] Distance from Fagereng et al Table 9 is 0.5009391718629084