# General imports and parameters of figures should be loaded at the beginning of the overview # General imports %matplotlib inline import numpy as np import matplotlib.pyplot as plt from matplotlib import colors as mcolors # Figure config colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS) LEGEND_SIZE = 15 TITLE_SIZE = 25 AXIS_SIZE = 15
The goal of this notebook is to illustrate how the a model can wrapped and used in different tasks in Emukit.
These steps are common to all methods in Emukit. Here we illustrate how to do it with the branin function.
from emukit.test_functions import branin_function from emukit.core import ParameterSpace, ContinuousParameter from emukit.experimental_design.model_free.random_design import RandomDesign from GPy.models import GPRegression from emukit.model_wrappers import GPyModelWrapper from emukit.model_wrappers.gpy_quadrature_wrappers import BaseGaussianProcessGPy, RBFGPy
f, _ = branin_function()
The parameter space contains the definition of the input variables of the function. Currently Emukit supports continuous and discrete parameters.
parameter_space = ParameterSpace([ContinuousParameter('x1', -5, 10), ContinuousParameter('x2', 0, 15)])
In this step we are just collecting some initial random points in the parameter space of the objective. These points are used to initialize and emulator of the function. We use the RamdonDesing class available in Emukit for this.
num_data_points = 30 design = RandomDesign(parameter_space) X = design.get_samples(num_data_points) Y = f(X)
To conclude the steps that are common to any method in Emukit we now build an initial emulator of the objective function and we wrap the model in Emukit. In this example we use GPy but note that any modeling framework can be used here.
model_gpy = GPRegression(X,Y) model_gpy.optimize() model_emukit = GPyModelWrapper(model_gpy)
In this section we use the model that we have created to solve different decision tasks. When the model is used in a decision loop (Bayesian optimization, Bayesian quadrature, Experimental design) the loop needs to be loaded and elements like acquisitions and optimizer need to be passed in together with other parameters. Sensitivity analysis provide us with tools to interpret the model so no loops is needed.
# Decision loops from emukit.experimental_design.model_based import ExperimentalDesignLoop from emukit.bayesian_optimization.loops import BayesianOptimizationLoop from emukit.quadrature.loop import VanillaBayesianQuadratureLoop # Acquisition functions from emukit.bayesian_optimization.acquisitions import ExpectedImprovement from emukit.experimental_design.model_based.acquisitions import ModelVariance from emukit.quadrature.acquisitions import IntegralVarianceReduction # Acquistions optimizers from emukit.core.optimization import AcquisitionOptimizer # Stopping conditions from emukit.core.loop import FixedIterationsStoppingCondition # Point calculator from emukit.core.loop import SequentialPointCalculator # Bayesian quadrature kernel and model from emukit.quadrature.kernels import QuadratureRBF from emukit.quadrature.methods import VanillaBayesianQuadrature
Here we use the model to find the minimum of the objective using Bayesian optimization in a sequential way. We collect 10 batches of points in batches of size 5.
# Load core elements for Bayesian optimization expected_improvement = ExpectedImprovement(model = model_emukit) optimizer = AcquisitionOptimizer(space = parameter_space) point_calculator = SequentialPointCalculator(expected_improvement, optimizer)
# Create the Bayesian optimization object bayesopt_loop = BayesianOptimizationLoop(model = model_emukit, space = parameter_space, acquisition = expected_improvement, batch_size = 5)
# Run the loop and extract the optimum stopping_condition = FixedIterationsStoppingCondition(i_max = 10) bayesopt_loop.run_loop(f, stopping_condition)
Here we use the same model to perform experimental design. We use the model variance We collect 10 batches of 5 points each. After each batch is collected the model is updated.
# Load core elements for Experimental design model_variance = ModelVariance(model = model_emukit) optimizer = AcquisitionOptimizer(space = parameter_space) point_calculator = SequentialPointCalculator(model_variance, optimizer)
# Create the Experimental design object expdesign_loop = ExperimentalDesignLoop(space = parameter_space, model = model_emukit, acquisition = model_variance, update_interval = 1, batch_size = 5)
# Run the loop stopping_condition = FixedIterationsStoppingCondition(i_max = 10) expdesign_loop.run_loop(f, stopping_condition)
Here we use vanilla BQ from the quadrature package to integrate the branin function. Note that we have to create a slightly different emukit model since the BQ package needs some additional information. But we can re-use the GPy GP model from above. We also need to specify the integral bounds, i.e., the domain which we want to integrate over.
# Define the lower and upper bounds of the integral. integral_bounds = [(-5, 10), (0, 15)]
# Load core elements for Bayesian quadrature emukit_qrbf = QuadratureRBF(RBFGPy(model_gpy.kern), integral_bounds=integral_bounds) emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=model_gpy) emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
# Create the Bayesian quadrature object bq_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
# Run the loop and extract the integral estimate num_iter = 5 bq_loop.run_loop(f, stopping_condition=num_iter) integral_mean, integral_variance = bq_loop.model.integrate()
To compute the sensitivity indexes of a model does not require any loop. We just load the class MonteCarloSensitivity and pass the model and the parameter space.
from emukit.sensitivity.monte_carlo import MonteCarloSensitivity # No loop here, compute Sobol indices senstivity_analysis = MonteCarloSensitivity(model = model_emukit, input_domain = parameter_space) main_effects, total_effects, _ = senstivity_analysis.compute_effects(num_monte_carlo_points = 10000)
We have run all these methods using the same model and the sample objective function. With Emukit, probabilistic models can be easily tested over multiple decision problem with a very light API.