%matplotlib inline
import pymc3 as pm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import OrderedDict
from ipywidgets import interactive, fixed
plt.style.use('seaborn-darkgrid')
print('Running on PyMC3 v{}'.format(pm.__version__))
rndst = np.random.RandomState(0)
Running on PyMC3 v3.3
A fairly minimal reproducable example of Model Selection using WAIC, and LOO as currently implemented in PyMC3.
This example creates two toy datasets under linear and quadratic models, and then tests the fit of a range of polynomial linear models upon those datasets by using Widely Applicable Information Criterion (WAIC), and leave-one-out (LOO) cross-validation using Pareto-smoothed importance sampling (PSIS).
The example was inspired by Jake Vanderplas' blogpost on model selection, although Cross-Validation and Bayes Factor comparison are not implemented. The datasets are tiny and generated within this Notebook. They contain errors in the measured value (y) only.
def generate_data(n=20, p=0, a=1, b=1, c=0, latent_sigma_y=20):
'''
Create a toy dataset based on a very simple model that we might
imagine is a noisy physical process:
1. random x values within a range
2. latent error aka inherent noise in y
3. optionally create labelled outliers with larger noise
Model form: y ~ a + bx + cx^2 + e
NOTE: latent_sigma_y is used to create a normally distributed,
'latent error' aka 'inherent noise' in the 'physical' generating
process, rather than experimental measurement error.
Please don't use the returned `latent_error` values in inferential
models, it's returned in the dataframe for interest only.
'''
df = pd.DataFrame({'x':rndst.choice(np.arange(100), n, replace=False)})
## create linear or quadratic model
df['y'] = a + b*(df['x']) + c*(df['x'])**2
## create latent noise and marked outliers
df['latent_error'] = rndst.normal(0, latent_sigma_y, n)
df['outlier_error'] = rndst.normal(0, latent_sigma_y*10, n)
df['outlier'] = rndst.binomial(1, p, n)
## add noise, with extreme noise for marked outliers
df['y'] += ((1-df['outlier']) * df['latent_error'])
df['y'] += (df['outlier'] * df['outlier_error'])
## round
for col in ['y','latent_error','outlier_error','x']:
df[col] = np.round(df[col],3)
## add label
df['source'] = 'linear' if c == 0 else 'quadratic'
## create simple linspace for plotting true model
plotx = np.linspace(df['x'].min() - np.ptp(df['x'])*.1,
df['x'].max() + np.ptp(df['x'])*.1, 100)
ploty = a + b * plotx + c * plotx ** 2
dfp = pd.DataFrame({'x':plotx, 'y':ploty})
return df, dfp
def interact_dataset(n=20, p=0, a=-30, b=5, c=0, latent_sigma_y=20):
'''
Convenience function:
Interactively generate dataset and plot
'''
df, dfp = generate_data(n, p, a, b, c, latent_sigma_y)
g = sns.FacetGrid(df, size=8, hue='outlier', hue_order=[True,False]
,palette=sns.color_palette('Set1'), legend_out=False)
g.map(plt.errorbar, 'x', 'y', 'latent_error', marker="o",
ms=10, mec='w', mew=2, ls='', elinewidth=0.7).add_legend()
plt.plot(dfp['x'], dfp['y'], '--', alpha=0.8)
plt.subplots_adjust(top=0.92)
g.fig.suptitle('Sketch of Data Generation ({})'.format(df['source'][0]), fontsize=16)
def plot_datasets(df_lin, df_quad, dfp_lin, dfp_quad):
'''
Convenience function:
Plot the two generated datasets in facets with generative model
'''
df = pd.concat((df_lin, df_quad), axis=0)
g = sns.FacetGrid(col='source', hue='source', data=df, size=6,
sharey=False, legend_out=False)
g.map(plt.scatter, 'x', 'y', alpha=0.7, s=100, lw=2, edgecolor='w')
g.axes[0][0].plot(dfp_lin['x'], dfp_lin['y'], '--', alpha=0.6)
g.axes[0][1].plot(dfp_quad['x'], dfp_quad['y'], '--', alpha=0.6)
def plot_traces(traces, retain=1000):
'''
Convenience function:
Plot traces with overlaid means and values
'''
ax = pm.traceplot(traces[-retain:], figsize=(12,len(traces.varnames)*1.5),
lines={k: v['mean'] for k, v in pm.summary(traces[-retain:]).iterrows()})
for i, mn in enumerate(pm.summary(traces[-retain:])['mean']):
ax[i,0].annotate('{:.2f}'.format(mn), xy=(mn,0), xycoords='data',
xytext=(5,10), textcoords='offset points', rotation=90,
va='bottom', fontsize='large', color='#AA0022')
def create_poly_modelspec(k=1):
'''
Convenience function:
Create a polynomial modelspec string for patsy
'''
return ('y ~ 1 + x ' + ' '.join(['+ np.power(x,{})'.format(j)
for j in range(2, k+1)])).strip()
def run_models(df, upper_order=5):
'''
Convenience function:
Fit a range of pymc3 models of increasing polynomial complexity.
Suggest limit to max order 5 since calculation time is exponential.
'''
models, traces = OrderedDict(), OrderedDict()
for k in range(1, upper_order+1):
nm = 'k{}'.format(k)
fml = create_poly_modelspec(k)
with pm.Model() as models[nm]:
print('\nRunning: {}'.format(nm))
pm.glm.GLM.from_formula(fml, df,
priors={'Intercept':pm.Normal.dist(mu=0, sd=100)},
family=pm.glm.families.Normal())
traces[nm] = pm.sample(2000)
return models, traces
def plot_posterior_cr(models, traces, rawdata, xlims,
datamodelnm='linear', modelnm='k1'):
'''
Convenience function:
Plot posterior predictions with credible regions shown as filled areas.
'''
## Get traces and calc posterior prediction for npoints in x
npoints = 100
mdl = models[modelnm]
trc = pm.trace_to_dataframe(traces[modelnm][-1000:])
trc = trc[[str(v) for v in mdl.cont_vars[:-1]]]
ordr = int(modelnm[-1:])
x = np.linspace(xlims[0], xlims[1], npoints).reshape((npoints,1))
pwrs = np.ones((npoints,ordr+1)) * np.arange(ordr+1)
X = x ** pwrs
cr = np.dot(X, trc.T)
## Calculate credible regions and plot over the datapoints
dfp = pd.DataFrame(np.percentile(cr,[2.5, 25, 50, 75, 97.5], axis=1).T,
columns=['025','250','500','750','975'])
dfp['x'] = x
pal = sns.color_palette('Greens')
f, ax1d = plt.subplots(1,1, figsize=(7,7))
f.suptitle('Posterior Predictive Fit -- Data: {} -- Model: {}'.format(datamodelnm,
modelnm), fontsize=16)
plt.subplots_adjust(top=0.95)
ax1d.fill_between(dfp['x'], dfp['025'], dfp['975'], alpha=0.5,
color=pal[1], label='CR 95%')
ax1d.fill_between(dfp['x'], dfp['250'], dfp['750'], alpha=0.5,
color=pal[4], label='CR 50%')
ax1d.plot(dfp['x'], dfp['500'], alpha=0.6, color=pal[5], label='Median')
plt.legend()
ax1d.set_xlim(xlims)
sns.regplot(x='x', y='y', data=rawdata, fit_reg=False,
scatter_kws={'alpha':0.7,'s':100, 'lw':2,'edgecolor':'w'}, ax=ax1d)
Throughout the rest of the Notebook, we'll use two toy datasets created by a linear and a quadratic model respectively, so that we can better evaluate the fit of the model selection.
Right now, lets use an interactive session to play around with the data generation function in this Notebook, and get a feel for the possibilities of data we could generate.
$$y_{i} = a + bx_{i} + cx_{i}^{2} + \epsilon_{i}$$where:
$i \in n$ datapoints
$\epsilon \sim \mathcal{N}(0,latent\_sigma\_y)$
NOTE on outliers:
p
to set the (approximate) proportion of 'outliers' under a bernoulli distribution.latent_sigma_y
GLM-robust-with-outlier-detection.ipynb
interactive(interact_dataset, n=[5,50,5], p=[0,.5,.05], a=[-50,50],
b=[-10,10], c=[-3,3], latent_sigma_y=[0,1000,50])
A Jupyter Widget
Observe:
latent_error
in errorbars, but this is for interest only, since this shows the inherent noise in whatever 'physical process' we imagine created the data.We can use the above interactive plot to get a feel for the effect of the params. Now we'll create 2 fixed datasets to use for the remainder of the Notebook.
n = 12
df_lin, dfp_lin = generate_data(n=n, p=0, a=-30, b=5, c=0, latent_sigma_y=40)
df_quad, dfp_quad = generate_data(n=n, p=0, a=-200, b=2, c=3, latent_sigma_y=500)
Scatterplot against model line
plot_datasets(df_lin, df_quad, dfp_lin, dfp_quad)
Observe:
df_lin
and df_quad
created by a linear model and quadratic model respectively.dfs_lin = df_lin.copy()
dfs_lin['x'] = (df_lin['x'] - df_lin['x'].mean()) / df_lin['x'].std()
dfs_quad = df_quad.copy()
dfs_quad['x'] = (df_quad['x'] - df_quad['x'].mean()) / df_quad['x'].std()
Create ranges for later ylim xim
dfs_lin_xlims = (dfs_lin['x'].min() - np.ptp(dfs_lin['x'])/10,
dfs_lin['x'].max() + np.ptp(dfs_lin['x'])/10)
dfs_lin_ylims = (dfs_lin['y'].min() - np.ptp(dfs_lin['y'])/10,
dfs_lin['y'].max() + np.ptp(dfs_lin['y'])/10)
dfs_quad_ylims = (dfs_quad['y'].min() - np.ptp(dfs_quad['y'])/10,
dfs_quad['y'].max() + np.ptp(dfs_quad['y'])/10)
This linear model is really simple and conventional, an OLS with L2 constraints (Ridge Regression):
$$y = a + bx + \epsilon$$with pm.Model() as mdl_ols:
## define Normal priors to give Ridge regression
b0 = pm.Normal('b0', mu=0, sd=100)
b1 = pm.Normal('b1', mu=0, sd=100)
## define Linear model
yest = b0 + b1 * df_lin['x']
## define Normal likelihood with HalfCauchy noise (fat tails, equiv to HalfT 1DoF)
sigma_y = pm.HalfCauchy('sigma_y', beta=10)
likelihood = pm.Normal('likelihood', mu=yest, sd=sigma_y, observed=df_lin['y'])
traces_ols = pm.sample(2000)
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sigma_y_log__, b1, b0] 100%|██████████| 2500/2500 [00:03<00:00, 826.32it/s]
plot_traces(traces_ols, retain=1000)
Observe:
PyMC3 has a module - glm
- for defining models using a patsy
-style formula syntax. This seems really useful, especially for defining simple regression models in fewer lines of code.
Here's the same OLS model as above, defined using glm
.
with pm.Model() as mdl_ols_glm:
# setup model with Normal likelihood (which uses HalfCauchy for error prior)
pm.glm.GLM.from_formula('y ~ 1 + x', df_lin, family=pm.glm.families.Normal())
traces_ols_glm = pm.sample(2000)
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, x, Intercept] 100%|██████████| 2500/2500 [00:03<00:00, 788.53it/s] There were 1 divergences after tuning. Increase `target_accept` or reparameterize.
plot_traces(traces_ols_glm, retain=1000)
Observe:
The output parameters are of course named differently to the custom naming before. Now we have:
b0 == Intercept
b1 == x
sigma_y == sd
However, naming aside, this glm
-defined model appears to behave in a very similar way, and finds the same parameter values as the conventionally-defined model - any differences are due to the random nature of the sampling.
We can quite happily use the glm
syntax for further models below, since it allows us to create a small model factory very easily.
Back to the real purpose of this Notebook, to demonstrate model selection.
First, let's create and run a set of polynomial models on each of our toy datasets. By default this is for models of order 1 to 5.
Please see run_models()
above for details. Generally, we're creating 5 polynomial models and fitting each to the chosen dataset
models_lin, traces_lin = run_models(dfs_lin, 5)
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag...
Running: k1
Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, x, Intercept] 100%|██████████| 2500/2500 [00:01<00:00, 1251.87it/s]
Running: k2
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, np.power(x, 2), x, Intercept] 100%|██████████| 2500/2500 [00:02<00:00, 1036.36it/s]
Running: k3
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, np.power(x, 3), np.power(x, 2), x, Intercept] 100%|██████████| 2500/2500 [00:04<00:00, 530.45it/s]
Running: k4
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, np.power(x, 4), np.power(x, 3), np.power(x, 2), x, Intercept] 100%|██████████| 2500/2500 [00:07<00:00, 347.64it/s] There were 2 divergences after tuning. Increase `target_accept` or reparameterize. There were 3 divergences after tuning. Increase `target_accept` or reparameterize. The number of effective samples is smaller than 25% for some parameters.
Running: k5
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, np.power(x, 5), np.power(x, 4), np.power(x, 3), np.power(x, 2), x, Intercept] 100%|██████████| 2500/2500 [00:24<00:00, 101.41it/s] There were 33 divergences after tuning. Increase `target_accept` or reparameterize. There were 28 divergences after tuning. Increase `target_accept` or reparameterize. The number of effective samples is smaller than 25% for some parameters.
models_quad, traces_quad = run_models(dfs_quad, 5)
Running: k1
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, x, Intercept] 100%|██████████| 2500/2500 [00:02<00:00, 875.37it/s] The acceptance probability does not match the target. It is 0.993112884126, but should be close to 0.8. Try to increase the number of tuning steps. Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag...
Running: k2
Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, np.power(x, 2), x, Intercept] 100%|██████████| 2500/2500 [00:04<00:00, 578.24it/s] The acceptance probability does not match the target. It is 0.883398371777, but should be close to 0.8. Try to increase the number of tuning steps. The acceptance probability does not match the target. It is 0.999997671953, but should be close to 0.8. Try to increase the number of tuning steps. The chain reached the maximum tree depth. Increase max_treedepth, increase target_accept or reparameterize. The gelman-rubin statistic is larger than 1.2 for some parameters. The estimated number of effective samples is smaller than 200 for some parameters.
Running: k3
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, np.power(x, 3), np.power(x, 2), x, Intercept] 100%|██████████| 2500/2500 [03:07<00:00, 13.32it/s] The acceptance probability does not match the target. It is 0.999381198882, but should be close to 0.8. Try to increase the number of tuning steps. The acceptance probability does not match the target. It is 0.899455530613, but should be close to 0.8. Try to increase the number of tuning steps.
Running: k4
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, np.power(x, 4), np.power(x, 3), np.power(x, 2), x, Intercept] 100%|██████████| 2500/2500 [00:05<00:00, 430.11it/s] The acceptance probability does not match the target. It is 0.920287453662, but should be close to 0.8. Try to increase the number of tuning steps. The acceptance probability does not match the target. It is 0.993538143406, but should be close to 0.8. Try to increase the number of tuning steps.
Running: k5
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (2 chains in 2 jobs) NUTS: [sd_log__, np.power(x, 5), np.power(x, 4), np.power(x, 3), np.power(x, 2), x, Intercept] 100%|██████████| 2500/2500 [00:06<00:00, 407.53it/s] The acceptance probability does not match the target. It is 0.924822538129, but should be close to 0.8. Try to increase the number of tuning steps. The acceptance probability does not match the target. It is 0.986333585677, but should be close to 0.8. Try to increase the number of tuning steps.
Evaluate log likelihoods straight from model.logp
dfll = pd.DataFrame(index=['k1','k2','k3','k4','k5'], columns=['lin','quad'])
dfll.index.name = 'model'
for nm in dfll.index:
dfll.loc[nm,'lin'] = - models_lin[nm].logp(pm.summary(traces_lin[nm],
traces_lin[nm].varnames)['mean'].to_dict())
dfll.loc[nm,'quad'] = - models_quad[nm].logp(pm.summary(traces_quad[nm],
traces_quad[nm].varnames)['mean'].to_dict())
dfll = pd.melt(dfll.reset_index(), id_vars=['model'],
var_name='poly', value_name='log_likelihood')
Plot log-likelihoods
g = sns.factorplot(x='model', y='log_likelihood', col='poly',
hue='poly', data=dfll, size=6)
Observe:
Just for the linear, generated data, lets take an interactive look at the posterior predictive fit for the models k1 through k5.
As indicated by the likelhood plots above, the higher-order polynomial models exhibit some quite wild swings in the function in order to (over)fit the data
interactive(plot_posterior_cr, models=fixed(models_lin), traces=fixed(traces_lin),
rawdata=fixed(dfs_lin), xlims=fixed(dfs_lin_xlims), datamodelnm=fixed('linear'),
modelnm = ['k1','k2','k3','k4','k5'])
A Jupyter Widget
The Widely Applicable Information Criterion (WAIC) can be used to calculate the goodness-of-fit of a model using numerical techniques. See (Watanabe 2013) for details.
Observe:
In this case we are interested in the WAIC score. We also plot error bars for the standard error of the estimated scores. This gives us a more accurate view of how much they might differ.
Now loop through all the models and calculate the WAIC
dfwaic = pd.DataFrame(index=['k1','k2','k3','k4','k5'], columns=['lin','quad'])
dfwaic.index.name = 'model'
for nm in dfwaic.index:
dfwaic.loc[nm, 'lin'] = pm.waic(traces_lin[nm], models_lin[nm])[:2]
dfwaic.loc[nm, 'quad'] = pm.waic(traces_quad[nm], models_quad[nm])[:2]
dfwaic = pd.melt(dfwaic.reset_index(), id_vars=['model'], var_name='poly', value_name='waic_')
dfwaic[['waic', 'waic_se']] = dfwaic['waic_'].apply(pd.Series)
# Define a wrapper function for plt.errorbar
def errorbar(x, y, se, order, color, **kws):
xnum = [order.index(x_i) for x_i in x]
plt.errorbar(xnum, y, yerr=se, color='k', ls='None')
g = sns.factorplot(x='model', y='waic', col='poly', hue='poly', data=dfwaic, size=6)
order = sns.utils.categorical_order(dfwaic['model'])
g.map(errorbar,'model', 'waic', 'waic_se', order=order);
/home/osvaldo/proyectos/00_PyMC3/pymc3/pymc3/stats.py:194: UserWarning: For one or more samples the posterior variance of the log predictive densities exceeds 0.4. This could be indication of WAIC starting to fail see http://arxiv.org/abs/1507.04544 for details """)
Observe
We should prefer the model(s) with lower WAIC
Linear-generated data (lhs):
Quadratic-generated data (rhs):
Leave-One-Out Cross-Validation or K-fold Cross-Validation is another quite universal approach for model selection. However, to implement K-fold cross-validation we need to paritition the data repeatly and fit the model on every partition. It can be very time consumming (computation time increase roughly as a factor of K). Here we are applying the numerical approach using the posterier trace as suggested in Vehtari et al 2015.
dfloo = pd.DataFrame(index=['k1','k2','k3','k4','k5'], columns=['lin','quad'])
dfloo.index.name = 'model'
for nm in dfloo.index:
dfloo.loc[nm, 'lin'] = pm.loo(traces_lin[nm], models_lin[nm])[:2]
dfloo.loc[nm, 'quad'] = pm.loo(traces_quad[nm], models_quad[nm])[:2]
dfloo = pd.melt(dfloo.reset_index(), id_vars=['model'], var_name='poly', value_name='loo_')
dfloo[['loo', 'loo_se']] = dfloo['loo_'].apply(pd.Series)
g = sns.factorplot(x='model', y='loo', col='poly', hue='poly', data=dfloo, size=6)
order = sns.utils.categorical_order(dfloo['model'])
g.map(errorbar,'model', 'loo', 'loo_se', order=order);
/home/osvaldo/proyectos/00_PyMC3/pymc3/pymc3/stats.py:270: UserWarning: Estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples. You should consider using a more robust model, this is because importance sampling is less likely to work well if the marginal posterior and LOO posterior are very different. This is more likely to happen with a non-robust model and highly influential observations. happen with a non-robust model and highly influential observations.""")
Observe
We should prefer the model(s) with lower LOO. You can see that LOO is nearly identical with WAIC. That's because WAIC is asymptotically equal to LOO. However, PSIS-LOO is supposedly more robust than WAIC in the finite case (under weak priors or influential observation).
Linear-generated data (lhs):
Quadratic-generated data (rhs):
It is important to keep in mind that, with more data points, the real underlying model (one that we used to generate the data) should outperform other models.
There is some agreement that PSIS-LOO offers the best indication of a model's quality. To quote from avehtari's comment: "I also recommend using PSIS-LOO instead of WAIC, because it's more reliable and has better diagnostics as discussed in http://link.springer.com/article/10.1007/s11222-016-9696-4 (preprint https://arxiv.org/abs/1507.04544), but if you insist to have one information criterion then leave WAIC".
Alternatively, Watanabe says "WAIC is a better approximator of the generalization error than the pareto smoothing importance sampling cross validation. The Pareto smoothing cross validation may be the better approximator of the cross validation than WAIC, however, it is not of the generalization error".
For more information on Model Selection in PyMC3, and about Bayesian model selection, you could start with:
Example originally contributed by Jonathan Sedar 2016-01-09 github.com/jonsedar. Edited by Junpeng Lao 2017-07-6 github.com/junpenglao