using GaussianProcesses srand(20140430) # Training data n=10; #number of training points x = 2π * rand(n); #predictors y = sin.(x) + 0.05*randn(n); #regressors #Select mean and covariance function mZero = MeanZero() #Zero mean function kern = SE(0.0,0.0) #Sqaured exponential kernel (note that hyperparameters are on the log scale) logObsNoise = -1.0 # log standard deviation of observation noise (this is optional) gp = GP(x,y,mZero,kern,logObsNoise) #Fit the GP μ, σ² = predict_y(gp,linspace(0,2π,100)); using Plots #Load Plots.jl package plot(gp; xlabel="x", ylabel="y", title="Gaussian process", legend=false, fmt=:png) # Plot the GP optimize!(gp; method=Optim.BFGS()) # Optimise the hyperparameters plot(gp) #Plot the GP after the hyperparameters have been optimised using Distributions set_priors!(kern, [Normal(), Normal()]) # Uniform(0,1) distribution assumed by default if priors not specified chain = mcmc(gp) plot(chain', label=["Noise", "SE log length", "SE log scale"]) #Training data d, n = 2, 50; #Dimension and number of observations x = 2π * rand(d, n); #Predictors y = vec(sin.(x[1,:]).*sin.(x[2,:])) + 0.05*rand(n); #Responses mZero = MeanZero() # Zero mean function kern = Matern(5/2,[0.0,0.0],0.0) + SE(0.0,0.0) # Sum kernel with Matern 5/2 ARD kernel # with parameters [log(ℓ₁), log(ℓ₂)] = [0,0] and log(σ) = 0 # and Squared Exponential Iso kernel with # parameters log(ℓ) = 0 and log(σ) = 0 gp = GP(x,y,mZero,kern,-2.0) # Fit the GP optimize!(gp) # Optimize the hyperparameters plot(contour(gp) ,heatmap(gp))