Title: Two Years of Bayesian Bandits for E-Commerce
Abstract: At Monetate, we've deployed Bayesian bandits (both noncontextual and contextual) to help our clients optimize their e-commerce sites since early 2016. This talk is an overview of the lessons we've learned from both the processes of deploying real-time Bayesian machine learning systems at scale and building a data product on top of these systems that is accessible to non-technical users (marketers). This talk will cover
We will focus primarily on noncontextual bandits and give a brief overview of these problems in the contextual setting as time permits.
Bio: Austin Rochford is Principal Data Scientist and Director of Monetate Labs, where he does research and development for machine learning-driven marketing products. He is a recovering mathematician, a passionate Bayesian, and a PyMC3 developer.
A Dockerfile
that will produce a container with the dependenceis of this notebook is available here.
%matplotlib inline
from tqdm import tqdm, trange
from matplotlib import pyplot as plt
from matplotlib.dates import DateFormatter, MonthLocator
from matplotlib.ticker import StrMethodFormatter
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
SEED = 76224 # from random.org, for reproducibiliy
np.random.seed(SEED)
sns.set()
C = sns.color_palette()
pct_formatter = StrMethodFormatter('{x:.1%}')
#configure matplotlib
FIGURE_WIDTH = 8
FIGURE_HEIGHT = 6
plt.rc('figure', figsize=(FIGURE_WIDTH, FIGURE_HEIGHT))
LABELSIZE = 14
plt.rc('axes', labelsize=LABELSIZE)
plt.rc('axes', titlesize=LABELSIZE)
plt.rc('figure', titlesize=LABELSIZE)
plt.rc('legend', fontsize=LABELSIZE)
plt.rc('xtick', labelsize=LABELSIZE)
plt.rc('ytick', labelsize=LABELSIZE)
![]() |
![]() |
|
![]() |
![]() |
![]() |
Much of the A/B testing industry uses classical Fisher/Neyman-Pearson style fixed-horizon frequentist significance tests. Sophistocated approaches use sequential hypothesis testing. We've found, through many years of interaction with marketers, that they take a fundamentally Bayesian view of the world. Most interpret p-values as the "probability that the experiment is better/worse than the control."
Multi-armed bandit problems are extensively studied and come in many variantions. Here we focus on the simplest multi-armed bandit objective, regret minimization.
Thompson sampling randomizes user/variant assignment according to the probabilty that each variant maximizes the posterior expected reward.
The probability that a user is assigned variant A is
$$ \begin{align*} P(r_A > r_B\ |\ \mathcal{D}) & = \int_0^1 P(r_A > r\ |\ \mathcal{D})\ \pi_B(r\ |\ \mathcal{D})\ dr \\ & = \int_0^1 \left(\int_r^1 \pi_A(s\ |\ \mathcal{D})\ ds\right)\ \pi_B(r\ |\ \mathcal{D})\ dr \\ & \propto \int_0^1 \left(\int_r^1 s^{\alpha_A - 1} (1 - s)^{\beta_A - 1}\ ds\right) r^{\alpha_B - 1} (1 - r)^{\beta_B - 1}\ dr \end{align*} $$fig, ax = plt.subplots(
figsize=(0.75 * FIGURE_WIDTH, 0.75 * FIGURE_WIDTH)
)
ax.set_aspect('equal');
a_post = sp.stats.beta(5 + 1, 15 + 1)
b_post = sp.stats.beta(7 + 1, 13 + 1)
p = np.linspace(0, 1, 100)
px, py = np.meshgrid(p, p)
z = a_post.pdf(px) * b_post.pdf(py)
ax.contourf(px, py, z, 1000, cmap='BuGn');
ax.plot([0, 1], [0, 1], c='k', ls='--');
ax.xaxis.set_major_formatter(pct_formatter);
ax.set_xlabel(r"$r_A \sim \operatorname{Beta}(6, 16)$");
ax.yaxis.set_major_formatter(pct_formatter);
ax.set_ylabel(r"$r_B \sim \operatorname{Beta}(8, 14)$");
ax.set_title("Posterior reward rates");
fig
N_SAMPLES = 1000
a_samples = a_post.rvs(size=N_SAMPLES)
b_samples = b_post.rvs(size=N_SAMPLES)
ax.scatter(
a_samples[b_samples > a_samples],
b_samples[b_samples > a_samples],
c='k', alpha=0.25
);
ax.scatter(
a_samples[b_samples < a_samples],
b_samples[b_samples < a_samples],
c=C[2], alpha=0.25
);
ts_fig = fig
fig
(a_samples > b_samples).mean()
class BetaBinomial:
def __init__(self, a0=1., b0=1.):
self.a = a0
self.b = b0
def sample(self):
return sp.stats.beta.rvs(self.a, self.b)
def update(self, n, x):
self.a += x
self.b += n - x
class Bandit:
def __init__(self, a_post, b_post):
self.a_post = a_post
self.b_post = b_post
def assign(self):
return 1 * (self.a_post.sample() < self.b_post.sample())
def update(self, arm, reward):
arm_post = self.a_post if arm == 0 else self.b_post
arm_post.update(1, reward)
def generate_rewards(a_rate, b_rate, n):
yield from zip(
np.random.binomial(1, a_rate, size=n),
np.random.binomial(1, b_rate, size=n)
)
A_RATE, B_RATE = 0.05, 0.1
N = 1000
rewards_gen = generate_rewards(A_RATE, B_RATE, N)
bandit = Bandit(BetaBinomial(), BetaBinomial())
arms = np.empty(N, dtype=np.int64)
rewards = np.empty(N)
for t, arm_rewards in tqdm(enumerate(rewards_gen), total=N):
arms[t] = bandit.assign()
rewards[t] = arm_rewards[arms[t]]
bandit.update(arms[t], rewards[t])
fig, (regret_ax, assign_ax) = plt.subplots(
ncols=2, sharex=True,
figsize=(2 * FIGURE_WIDTH, 6)
)
plot_t = np.arange(N) + 1
regret_ax.plot(
plot_t, rewards.cumsum(),
c=C[0], label="Bandit"
);
regret_ax.plot(
plot_t, A_RATE * plot_t,
c=C[1], ls='--',
label="Variant A"
);
regret_ax.plot(
plot_t, B_RATE * plot_t,
c=C[2], ls='--',
label="Variant B"
);
regret_ax.plot(
plot_t,
0.5 * (A_RATE + B_RATE) * plot_t,
c='k', ls='--',
label="A/B testing"
);
regret_ax.set_xlim(1, N);
regret_ax.set_xlabel("Sessions");
regret_ax.set_ylabel("Rewards");
regret_ax.legend(loc=2);
assign_ax.plot(plot_t, (arms == 1).cumsum() / plot_t);
assign_ax.hlines(
0.5, 1, N,
colors='k', linestyles='--'
);
assign_ax.set_xlabel("Sessions");
assign_ax.set_ylim(0, 1);
assign_ax.yaxis.set_major_formatter(pct_formatter);
assign_ax.set_yticks(np.linspace(0, 1, 5));
assign_ax.set_ylabel("Traffic shown variant B");
fig.suptitle("Cumulative bandit performance");
fig
def simulate_bandit(rewards, n,
progressbar=False,
prior=BetaBinomial):
bandit = Bandit(prior(), prior())
arms = np.empty(n, dtype=np.int64)
obs_rewards = np.empty(n)
if progressbar:
rewards = tqdm(rewards, total=n)
for t, arm_rewards in enumerate(rewards):
arms[t] = bandit.assign()
obs_rewards[t] = arm_rewards[arms[t]]
bandit.update(arms[t], obs_rewards[t])
return arms, obs_rewards
N_BANDIT = 100
arms = np.empty((N_BANDIT, N), dtype=np.int64)
rewards = np.empty((N_BANDIT, N))
for i in trange(N_BANDIT):
arms[i], rewards[i] = simulate_bandit(
generate_rewards(A_RATE, B_RATE, N), N
)
fig, (regret_ax, assign_ax) = plt.subplots(
ncols=2, sharex=True,
figsize=(2 * FIGURE_WIDTH, 6)
)
plot_t = np.arange(N) + 1
cum_rewards = rewards.cumsum(axis=1)
ALPHA = 0.1
low_rewards, high_rewards = np.percentile(
cum_rewards, [100 * ALPHA / 2, 100 * (1 - ALPHA / 2)],
axis=0
)
regret_ax.fill_between(
plot_t, low_rewards, high_rewards,
color=C[0], alpha=0.5,
label=f"{1 - ALPHA:.0%} interval"
);
regret_ax.plot(
plot_t, cum_rewards.mean(axis=0),
c=C[0], label="Bandit (average)"
);
regret_ax.plot(
plot_t, A_RATE * plot_t,
c=C[1], ls='--',
label="Variant A"
);
regret_ax.plot(
plot_t, B_RATE * plot_t,
c=C[2], ls='--',
label="Variant B"
);
regret_ax.plot(
plot_t,
0.5 * (A_RATE + B_RATE) * plot_t,
c='k', ls='--',
label="A/B testing"
);
regret_ax.set_xlim(1, N);
regret_ax.set_xlabel("Sessions");
regret_ax.set_ylabel("Rewards");
regret_ax.legend(loc=2);
cum_winner = (arms == 1 * (A_RATE < B_RATE)).cumsum(axis=1)
low_winner, high_winner = np.percentile(
cum_winner / plot_t, [100 * ALPHA / 2, 100 * (1 - ALPHA / 2)],
axis=0
)
assign_ax.fill_between(
plot_t, low_winner, high_winner,
color=C[0], alpha=0.5,
label=f"{1 - ALPHA:.0%} interval"
);
assign_ax.plot(plot_t, cum_winner.mean(axis=0) / plot_t);
assign_ax.hlines(
0.5, 1, N,
colors='k', linestyles='--'
);
assign_ax.set_xlabel("Sessions");
assign_ax.set_ylim(0, 1);
assign_ax.yaxis.set_major_formatter(pct_formatter);
assign_ax.set_yticks(np.linspace(0, 1, 5));
assign_ax.set_ylabel("Traffic shown better variant");
fig.suptitle(f"Cumulative bandit performance ({N_BANDIT} simulations)");