import sys
import respy as rp
import numpy as np
sys.path.insert(0, "python")
from auxiliary import plot_observed_choices # noqa: E402
from auxiliary import plot_time_preference # noqa: E402
from auxiliary import plot_policy_forecast # noqa: E402
Computational modeling in economics
Eckstein-Keane-Wolpin (EKW) models}
Components
Decision problem $$ \begin{array}{ll} t = 1, .., T& \text{decision period} \\ s_t\in S & \text{state} \\ a_t\in A & \text{action} \\ a_t(s_t) & \text{decision rule} \\ r_t(s_t, a_t) & \text{immediate reward}\\ \end{array} $$
Individual's objective $$ \max_{\pi \in\Pi} E_{s_1}^\pi\left[\left.\sum^{T}_{t = 1} \delta^{t - 1} r_t(s_t, a^\pi_t(s_t))\,\right\vert\,\mathcal{I}_1\,\right] $$
Policy evaluation $$ v^\pi_t(s_t) \equiv E_{s_t}^\pi\left[\left.\sum^{T - t}_{j = 0} \delta^j\, r_{t + j}(s_{t + j}, a^\pi_{t + j}(s_{t + j})) \,\right\vert\,\mathcal{I}_t\,\right]\\ $$
Inductive scheme $$ v^\pi_t(s_t) = r_t(s_t, a^\pi_t(s_t)) + \delta\,E^\pi_{s_t} \left[\left.v^\pi_{t + 1}(s_{t + 1}) \,\right\vert\,\mathcal{I}_t\,\right] $$
Optimality equations $$ v^{\pi^*}_t(s_t) = \max_{a_t \in A}\bigg\{ r_t(s_t, a_t) + \delta\, E^{\pi^*}_{s_t} \left[\left.v^{\pi^*}_{t + 1}(s_{t + 1})\,\right\vert\,\mathcal{I}_t\,\right] \bigg\} $$
Data $$\mathcal{D} = \{a_{it}, x_{it}, r_{it}: i = 1, ... , N; t = 1, ... , T_i\}\\ $$ State variables}
Procedures
Seminal paper
Model of occupational choice
Labor market $$ r_t(s_t, 1) = w_{1t} = \exp{ \underbrace{\alpha_{10}}{\text{endowment}} + \underbrace{\alpha{11} g_t}{\text{schooling}} + \underbrace{\alpha{12}e_{1t} + \alpha_{13}e^2_{1t}}_{\text{own experience}} \
$$
The same setup applies to the second occupation.
Schooling
$$ r_t(s_t, 3) = \underbrace{\beta_0}_{\text{taste}} - \underbrace{\beta_1 I[\,g_t \geq 12\,]}_{\text{direct cost}} - \underbrace{\beta_2 I[\,a_{t - 1} \neq 3\,]}_{\text{reenrollment effort}} + \underbrace{\epsilon_{3t}}_{\text{shock}} \\ $$Home $$ r_t(s_t, 4) = \underbrace{\gamma_0}_{\text{taste}} + \underbrace{\epsilon_{4t}}_{\text{shock}} $$
State space $$s_t = \{g_t,e_{1t},e_{2t},a_{t - 1},\epsilon_{1t},\epsilon_{2t},\epsilon_{3t},\epsilon_{4t}\} $$
Transitions
params, options = rp.get_example_model("kw_94_two", with_data=False)
How is the economy parametrized?
params.head()
value | comment | ||
---|---|---|---|
category | name | ||
delta | delta | 0.9500 | discount factor |
wage_a | constant | 9.2100 | log of rental price |
exp_edu | 0.0400 | return to an additional year of schooling | |
exp_a | 0.0330 | return to same sector experience | |
exp_a_square | -0.0005 | return to same sector, quadratic experience |
How are the options set?
options
{'estimation_draws': 200, 'estimation_seed': 500, 'estimation_tau': 500, 'interpolation_points': -1, 'n_periods': 40, 'simulation_agents': 1000, 'simulation_seed': 132, 'solution_draws': 500, 'solution_seed': 1, 'monte_carlo_sequence': 'random', 'core_state_space_filters': ["period > 0 and exp_{choices_w_exp} == period and lagged_choice_1 != '{choices_w_exp}'", "period > 0 and exp_a + exp_b + exp_edu == period and lagged_choice_1 == '{choices_wo_exp}'", "period > 0 and lagged_choice_1 == 'edu' and exp_edu == 0", "lagged_choice_1 == '{choices_w_wage}' and exp_{choices_w_wage} == 0", "period == 0 and lagged_choice_1 == '{choices_w_wage}'"], 'covariates': {'constant': '1', 'exp_a_square': 'exp_a ** 2', 'exp_b_square': 'exp_b ** 2', 'at_least_twelve_exp_edu': 'exp_edu >= 12', 'not_edu_last_period': "lagged_choice_1 != 'edu'"}}
We can now simulate a dataset and look at the individual decisions.
simulate_func = rp.get_simulate_func(params, options)
plot_observed_choices(simulate_func(params))
def time_preference_wrapper_kw_94(simulate_func, params, value):
policy_params = params.copy()
policy_params.loc[("delta", "delta"), "value"] = value
policy_df = simulate_func(policy_params)
edu = policy_df.groupby("Identifier")["Experience_Edu"].max().mean()
return edu
Now we can iterate over a grid of discount factors.
deltas = np.linspace(0.945, 0.955, 10)
edu_level = list()
for i, delta in enumerate(deltas):
stat = time_preference_wrapper_kw_94(simulate_func, params, delta)
edu_level.append(stat)
plot_time_preference(deltas, edu_level)
def tuition_policy_wrapper_kw_94(simulate_func, params, tuition_subsidy):
policy_params = params.copy()
policy_params.loc[("nonpec_edu", "at_least_twelve_exp_edu"), "value"] += tuition_subsidy
policy_df = simulate_func(policy_params)
edu = policy_df.groupby("Identifier")["Experience_Edu"].max().mean()
return edu
Now we can iterate over a grid of tuition subsidies.
subsidies = np.linspace(0, 1500, num=10, dtype=int, endpoint=True)
edu_level = list()
for i, subsidy in enumerate(subsidies):
stat = tuition_policy_wrapper_kw_94(simulate_func, params, subsidy)
edu_level.append(stat)
plot_policy_forecast(subsidies, edu_level)