Florent Leclercq,
Imperial Centre for Inference and Cosmology, Imperial College London,
florent.leclercq@polytechnique.org
from scipy.stats import cauchy, uniform, norm
import numpy as np
import matplotlib.pyplot as plt
d=2 #arbitrary units
xmin=-10
xmax=10
def p_flat(x):
return uniform.pdf(x, loc=xmin, scale=xmax-xmin)
def p_Jeyffrey(x):
return np.where(x<0, 0, 2e-2/x)
def p_Cauchy(x):
return cauchy.pdf(x, loc=0, scale=d)
x=np.linspace(xmin,xmax,100)
plt.figure(figsize=(12,8))
plt.plot(x,p_flat(x),label="Uniform distribution (flat prior on $x$)")
plt.plot(x,p_Jeyffrey(x),label="Jeyffrey's prior")
plt.plot(x,p_Cauchy(x),label="Lorentzian (Cauchy distribution) (flat prior on $\\theta$)")
plt.title("Different priors")
plt.legend(loc="best")
<matplotlib.legend.Legend at 0x14e687ba69e8>
Maximum ignorance for one variable is not the same thing as maximum ignorance on a non-linear function of that variable!
def p_Gaussian(x):
return norm.pdf(x, loc=0, scale=d)
plt.figure(figsize=(12,8))
plt.plot(x,p_Cauchy(x),label="Lorentzian",color='C2')
plt.plot(x,p_Gaussian(x)*p_Cauchy(0)/p_Gaussian(0),
label="Gaussian",color='C3')
plt.title("Lorentzian versus Gaussian")
plt.legend(loc="best")
<matplotlib.legend.Legend at 0x14e689c9d4a8>
The Lorentzian has broader tails than the Gaussian.