#!/usr/bin/env python
# coding: utf-8
# # The lighthouse problem
# Florent Leclercq,
# Imperial Centre for Inference and Cosmology, Imperial College London,
# florent.leclercq@polytechnique.org
# In[1]:
from scipy.stats import cauchy, uniform, norm
import numpy as np
import matplotlib.pyplot as plt
# ## Prior choice
# In[2]:
d=2 #arbitrary units
xmin=-10
xmax=10
# \begin{equation}
# x= d \tan \theta
# \end{equation}
# \begin{equation}
# p(\theta) = C \Rightarrow p(x) \propto \frac{d}{x^2+d^2}
# \end{equation}
# In[3]:
def p_flat(x):
return uniform.pdf(x, loc=xmin, scale=xmax-xmin)
def p_Jeyffrey(x):
return np.where(x<0, 0, 2e-2/x)
def p_Cauchy(x):
return cauchy.pdf(x, loc=0, scale=d)
# In[4]:
x=np.linspace(xmin,xmax,100)
plt.figure(figsize=(12,8))
plt.plot(x,p_flat(x),label="Uniform distribution (flat prior on $x$)")
plt.plot(x,p_Jeyffrey(x),label="Jeyffrey's prior")
plt.plot(x,p_Cauchy(x),label="Lorentzian (Cauchy distribution) (flat prior on $\\theta$)")
plt.title("Different priors")
plt.legend(loc="best")
# Maximum ignorance for one variable is not the same thing as maximum ignorance on a non-linear function of that variable!
# In[5]:
def p_Gaussian(x):
return norm.pdf(x, loc=0, scale=d)
# In[6]:
plt.figure(figsize=(12,8))
plt.plot(x,p_Cauchy(x),label="Lorentzian",color='C2')
plt.plot(x,p_Gaussian(x)*p_Cauchy(0)/p_Gaussian(0),
label="Gaussian",color='C3')
plt.title("Lorentzian versus Gaussian")
plt.legend(loc="best")
# The Lorentzian has broader tails than the Gaussian.