import adaptive
adaptive.notebook_extension()
# Import modules that are used in multiple cells
import holoviews as hv
import numpy as np
from functools import partial
import random
import matplotlib.pylab as pl
import matplotlib.pyplot as plt
from adaptive.tests import test_average_learner1d as tests
from adaptive.learner.learner1D import (curvature_loss_function,
uniform_loss,
default_loss,
triangle_loss)
%config InlineBackend.figure_formats=set(['svg'])
First, we define the (noisy) function to be sampled. If one wants to use our plotting tools, the function needs at least two arguments: x
(non-default argument) and sigma
(default argument). The first one is the value of the independent variable. sigma
is a parameter that measures the intensity of the noise, and it should have value $0$ when there is no noise (in our functions, sigma
is generally the standard deviation of a Gaussian noise).
def my_fun(x, sigma = 0, peak_width = 0.05, offset = -0.5, wait = False):
from time import sleep
from random import random
if wait:
sleep(random())
fun = x**3 - x + 3*peak_width**2 / (peak_width**2 + (x - offset)**2)
return fun + np.random.normal(0,sigma)
bounds = [-2,2]
sigma = 2
tests.plot_fun(partial(my_fun, sigma = 0), bounds, N = 400)
tests.plot_fun(partial(my_fun, sigma = sigma), bounds)
Before going into the AverageLearner1D, let us look at the results of a Learner1D sampling the function without noise. Note that we are using the triangle loss.
learner1 = adaptive.Learner1D(partial(my_fun, sigma = 0), bounds = bounds,
loss_per_interval=triangle_loss)
tests.run_N(learner1,25)
tests.plot_learner(learner1, Nfun = 200)
HBox(children=(FloatProgress(value=0.0, max=25.0), HTML(value='')))
The behavior of the AverageLearner1D is governed by several parameters:
loss_per_interval
: loss function (same as in the Learner1D). In our example we use the triangle loss.# Learner parameters
loss_per_interval = triangle_loss
strategy = 5
alfa = 0.005
delta = 0.5
min_samples = 50
max_samples = 3000
min_Delta_g = 0.001
neighbor_sampling = 0.5
# Function
fun = partial(my_fun, sigma = sigma)
# Other parameters
randomseed = 2
## -----------------------------------
# Initialize learner
learner = adaptive.AverageLearner1D(fun,
loss_per_interval=loss_per_interval,
strategy = strategy, bounds=bounds,
alfa=alfa, delta=delta,
min_samples=min_samples,
max_samples=max_samples,
min_Delta_g=min_Delta_g,
neighbor_sampling=neighbor_sampling)
Next, we run the learner for $N=15000$ samples ($n$ is the total number of points):
# Set random seed
random.seed(randomseed)
np.random.seed(randomseed)
# Run and plot in real-time
tests.simple_liveplot(learner, N_batch = 500, goal = lambda l: l.total_samples()>=15000)
# Alternative goal using the number of points n
#tests.simple_liveplot(learner, N_batch = 500, goal = lambda l: len(l.data)>=25)
The AverageLearner1D can also be run in parallel for computational speed-up. The way to do this is the same as with the Learner1D.
Note that we use the AverageLearner1D_parallel, which is the parallelized version of the AverageLearner1D. We kept both types of learner since they were being tested simultaneously. In the final version, AverageLearner1D_parallel will become the only AverageLearner1D.
# Initialize learner
learner = adaptive.AverageLearner1D_parallel(fun,
loss_per_interval=loss_per_interval,
bounds=bounds,
alfa=alfa, delta=delta,
min_samples=min_samples,
max_samples=max_samples,
min_Delta_g=min_Delta_g,
neighbor_sampling=neighbor_sampling)
# Set random seed
random.seed(randomseed)
np.random.seed(randomseed)
# Run IN PARALLEL
runner = adaptive.Runner(learner, goal = lambda l: l.total_samples>=15000)
runner.live_info()
runner.live_plot(update_interval=0.1)
HBox(children=(HTML(value='\n <dl>\n <dt class="ignore-css">status</dt><dd><font color="blue">ru…
Button(description='cancel live-plot', layout=Layout(width='150px'), style=ButtonStyle())