using PyPlot, Distributions d1 = Normal(0, 1) # μ=0, σ^2=1 d2 = Normal(3, 2) # μ=3, σ^2=4 # Calculate the parameters of the product d1*d2 s2_prod = (d1.σ^-2 + d2.σ^-2)^-1 m_prod = s2_prod * ((d1.σ^-2)*d1.μ + (d2.σ^-2)*d2.μ) d_prod = Normal(m_prod, sqrt(s2_prod)) # Note that we neglect the normalization constant. # Plot stuff x = range(-4, stop=8, length=100) plot(x, pdf.(d1,x), "k") plot(x, pdf.(d2,x), "b") plot(x, pdf.(d1,x) .* pdf.(d2,x), "r-") # Plot the exact product plot(x, pdf.(d_prod,x), "r:") # Plot the normalized Gaussian product legend([L"\mathcal{N}(0,1)", L"\mathcal{N}(3,4)", L"\mathcal{N}(0,1) \mathcal{N}(3,4)", L"Z^{-1} \mathcal{N}(0,1) \mathcal{N}(3,4)"]); using PyPlot, Distributions μ = [1.0; 2.0] Σ = [0.3 0.7; 0.7 2.0] joint = MvNormal(μ,Σ) marginal_x = Normal(μ[1], sqrt(Σ[1,1])) #Plot p(x,y) subplot(221) x_range = y_range = range(-2,stop=5,length=1000) joint_pdf = [ pdf(joint, [x_range[i];y_range[j]]) for j=1:length(y_range), i=1:length(x_range)] imshow(joint_pdf, origin="lower", extent=[x_range[1], x_range[end], y_range[1], y_range[end]]) grid(); xlabel("x"); ylabel("y"); title("p(x,y)"); tight_layout() # Plot p(x) subplot(222) plot(range(-2,stop=5,length=1000), pdf.(marginal_x, range(-2,stop=5,length=1000))) grid(); xlabel("x"); ylabel("p(x)"); title("Marginal distribution p(x)"); tight_layout() # Plot p(y|x) x = 0.1 conditional_y_m = μ[2]+Σ[2,1]*inv(Σ[1,1])*(x-μ[1]) conditional_y_s2 = Σ[2,2] - Σ[2,1]*inv(Σ[1,1])*Σ[1,2] conditional_y = Normal(conditional_y_m, sqrt.(conditional_y_s2)) subplot(223) plot(range(-2,stop=5,length=1000), pdf.(conditional_y, range(-2,stop=5,length=1000))) grid(); xlabel("y"); ylabel("p(y|x)"); title("Conditional distribution p(y|x)"); tight_layout() using PyPlot N = 50 # Number of observations θ = 2.0 # True value of the variable we want to estimate σ_ϵ2 = 0.25 # Observation noise variance x = sqrt(σ_ϵ2) * randn(N) .+ θ # Generate N noisy observations of θ t = 0 μ = fill!(Vector{Float64}(undef,N), NaN) # Means of p(θ|D) over time σ_μ2 = fill!(Vector{Float64}(undef,N), NaN) # Variances of p(θ|D) over time function performKalmanStep() # Perform a Kalman filter step, update t, μ, σ_μ2 global t += 1 if t>1 # Use posterior from prev. step as prior K = σ_μ2[t-1] / (σ_ϵ2 + σ_μ2[t-1]) # Kalman gain μ[t] = μ[t-1] + K*(x[t] - μ[t-1]) # Update mean using (1) σ_μ2[t] = σ_μ2[t-1] * (1.0-K) # Update variance using (2) elseif t==1 # Use prior # Prior p(θ) = N(0,1000) K = 1000.0 / (σ_ϵ2 + 1000.0) # Kalman gain μ[t] = 0 + K*(x[t] - 0) # Update mean using (1) σ_μ2[t] = 1000 * (1.0-K) # Update variance using (2) end end while t