#export
from local.test import *
from local.data.all import *
from local.optimizer import *
from local.learner import *
from local.notebook.showdoc import *
#default_exp metrics
# default_cls_lvl 3
Definition of the metrics that can be used in training models
This is where the function that converts scikit-learn metrics to fastai metrics is defined. You should skip this section unless you want to know all about the internals of fastai.
#export
import sklearn.metrics as skm
#export torch_core
def flatten_check(inp, targ):
"Check that `out` and `targ` have the same number of elements and flatten them."
inp,targ = inp.contiguous().view(-1),targ.contiguous().view(-1)
test_eq(len(inp), len(targ))
return inp,targ
x1,x2 = torch.randn(5,4),torch.randn(20)
x1,x2 = flatten_check(x1,x2)
test_eq(x1.shape, [20])
test_eq(x2.shape, [20])
x1,x2 = torch.randn(5,4),torch.randn(21)
test_fail(lambda: flatten_check(x1,x2))
#export
class AccumMetric(Metric):
"Stores predictions and targets on CPU in accumulate to perform final calculations with `func`."
def __init__(self, func, dim_argmax=None, sigmoid=False, thresh=None, to_np=False, invert_arg=False,
flatten=True, **kwargs):
store_attr(self,'func,dim_argmax,sigmoid,thresh,flatten')
self.to_np,self.invert_args,self.kwargs = to_np,invert_arg,kwargs
def reset(self): self.targs,self.preds = [],[]
def accumulate(self, learn):
pred = learn.pred.argmax(dim=self.dim_argmax) if self.dim_argmax else learn.pred
if self.sigmoid: pred = torch.sigmoid(pred)
if self.thresh: pred = (pred >= self.thresh)
targ = learn.y
if self.flatten: pred,targ = flatten_check(pred,targ)
self.preds.append(to_detach(pred))
self.targs.append(to_detach(targ))
@property
def value(self):
if len(self.preds) == 0: return
preds,targs = torch.cat(self.preds),torch.cat(self.targs)
if self.to_np: preds,targs = preds.numpy(),targs.numpy()
return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs)
func
is only applied to the accumulated predictions/targets when the value
attribute is asked for (so at the end of a validation/trianing phase, in use with Learner
and its Recorder
).The signature of func
should be inp,targ
(where inp
are the predictions of the model and targ
the corresponding labels).
For classification problems with single label, predictions need to be transformed with a sofmax then an argmax before being compared to the targets. Since a softmax doesn't change the order of the numbers, we can just apply the argmax. Pass along dim_argmax
to have this done by AccumMetric
(usually -1 will work pretty well).
For classification problems with multiple labels, or if your targets are onehot-encoded, predictions may need to pass through a sigmoid (if it wasn't included in your model) then be compared to a given threshold (to decide between 0 and 1), this is done by AccumMetric
if you pass sigmoid=True
and/or a value for thresh
.
If you want to use a metric function sklearn.metrics, you will need to convert predictions and labels to numpy arrays with to_np=True
. Also, scikit-learn metrics adopt the convention y_true
, y_preds
which is the opposite from us, so you will need to pass invert_arg=True
to make AccumMetric
do the inversion for you.
#For testing: a fake learner and a metric that isn't an average
class TstLearner():
def __init__(self): self.pred,self.y = None,None
def _l2_mean(x,y): return torch.sqrt((x.float()-y.float()).pow(2).mean())
#Go through a fake cycle with various batch sizes and computes the value of met
def compute_val(met, x1, x2):
met.reset()
vals = [0,6,15,20]
learn = TstLearner()
for i in range(3):
learn.pred,learn.y = x1[vals[i]:vals[i+1]],x2[vals[i]:vals[i+1]]
met.accumulate(learn)
return met.value
x1,x2 = torch.randn(20,5),torch.randn(20,5)
tst = AccumMetric(_l2_mean)
test_close(compute_val(tst, x1, x2), _l2_mean(x1, x2))
test_eq(torch.cat(tst.preds), x1.view(-1))
test_eq(torch.cat(tst.targs), x2.view(-1))
#test argmax
x1,x2 = torch.randn(20,5),torch.randint(0, 5, (20,))
tst = AccumMetric(_l2_mean, dim_argmax=-1)
test_close(compute_val(tst, x1, x2), _l2_mean(x1.argmax(dim=-1), x2))
#test thresh
x1,x2 = torch.randn(20,5),torch.randint(0, 2, (20,5)).bool()
tst = AccumMetric(_l2_mean, thresh=0.5)
test_close(compute_val(tst, x1, x2), _l2_mean((x1 >= 0.5), x2))
#test sigmoid
x1,x2 = torch.randn(20,5),torch.randn(20,5)
tst = AccumMetric(_l2_mean, sigmoid=True)
test_close(compute_val(tst, x1, x2), _l2_mean(torch.sigmoid(x1), x2))
#test to_np
x1,x2 = torch.randn(20,5),torch.randn(20,5)
tst = AccumMetric(lambda x,y: isinstance(x, np.ndarray) and isinstance(y, np.ndarray), to_np=True)
assert compute_val(tst, x1, x2)
#test invert_arg
x1,x2 = torch.randn(20,5),torch.randn(20,5)
tst = AccumMetric(lambda x,y: torch.sqrt(x.pow(2).mean()))
test_close(compute_val(tst, x1, x2), torch.sqrt(x1.pow(2).mean()))
tst = AccumMetric(lambda x,y: torch.sqrt(x.pow(2).mean()), invert_arg=True)
test_close(compute_val(tst, x1, x2), torch.sqrt(x2.pow(2).mean()))
#export
def skm_to_fastai(func, is_class=True, thresh=None, axis=-1, sigmoid=None, **kwargs):
"Convert `func` from sklearn.metrics to a fastai metric"
dim_argmax = axis if is_class and thresh is None else None
sigmoid = sigmoid if sigmoid is not None else (is_class and thresh is not None)
return AccumMetric(func, dim_argmax=dim_argmax, sigmoid=sigmoid, thresh=thresh,
to_np=True, invert_arg=True, **kwargs)
This is the quickest way to use a sckit-learn metric in a fastai training loop. is_class
indicates if you are in a classification problem or not. In this case:
thresh
to None
indicates it's a single-label classification problem and predictions will pass through an argmax over axis
before being compared to the targetsthresh
indicates it's a multi-label classification problem and predictions will pass through a sigmoid (can be deactivated with sigmoid=False
) and be compared to thresh
before being compared to the targetsIf is_class=False
, it indicates you are in a regression problem, and predictions are compared to the targets without being modified. In all cases, kwargs
are extra keyword arguments passed to func
.
tst_single = skm_to_fastai(skm.precision_score)
x1,x2 = torch.randn(20,2),torch.randint(0, 2, (20,))
test_close(compute_val(tst_single, x1, x2), skm.precision_score(x2, x1.argmax(dim=-1)))
tst_multi = skm_to_fastai(skm.precision_score, thresh=0.2)
x1,x2 = torch.randn(20),torch.randint(0, 2, (20,))
test_close(compute_val(tst_multi, x1, x2), skm.precision_score(x2, torch.sigmoid(x1) >= 0.2))
tst_multi = skm_to_fastai(skm.precision_score, thresh=0.2, sigmoid=False)
x1,x2 = torch.randn(20),torch.randint(0, 2, (20,))
test_close(compute_val(tst_multi, x1, x2), skm.precision_score(x2, x1 >= 0.2))
tst_reg = skm_to_fastai(skm.r2_score, is_class=False)
x1,x2 = torch.randn(20,5),torch.randn(20,5)
test_close(compute_val(tst_reg, x1, x2), skm.r2_score(x2.view(-1), x1.view(-1)))
#export
def optim_metric(f, argname, bounds, tol=0.01, do_neg=True, get_x=False):
"Replace metric `f` with a version that optimizes argument `argname`"
def _f(preds, targs):
def minfunc(x):
kwargs = {argname:x}
res = f(preds, targs, **kwargs)
return -res if do_neg else res
optres = scipy.optimize.minimize_scalar(minfunc, bounds=bounds, method='bounded',
options={'xatol':0.01})
fun = -optres.fun if do_neg else optres.fun
return (fun,optres.x) if get_x else fun
_f.__name__ = f'opt_{f.__name__}'
return _f
Warning: All functions defined in this section are intended for single-label classification and targets that aren't one-hot encoded. For multi-label problems or one-hot encoded targets, use the
_multi
version of them.
#export
def accuracy(inp, targ, axis=-1):
"Compute accuracy with `targ` when `pred` is bs * n_classes"
pred,targ = flatten_check(inp.argmax(dim=axis), targ)
return (pred == targ).float().mean()
#For testing
def change_targ(targ, n, c):
idx = torch.randperm(len(targ))[:n]
res = targ.clone()
for i in idx: res[i] = (res[i]+random.randint(1,c-1))%c
return res
x = torch.randn(4,5)
y = x.argmax(dim=1)
test_eq(accuracy(x,y), 1)
y1 = change_targ(y, 2, 5)
test_eq(accuracy(x,y1), 0.5)
test_eq(accuracy(x.unsqueeze(1).expand(4,2,5), torch.stack([y,y1], dim=1)), 0.75)
#export
def error_rate(inp, targ, axis=-1):
"1 - `accuracy`"
return 1 - accuracy(inp, targ, axis=axis)
x = torch.randn(4,5)
y = x.argmax(dim=1)
test_eq(error_rate(x,y), 0)
y1 = change_targ(y, 2, 5)
test_eq(error_rate(x,y1), 0.5)
test_eq(error_rate(x.unsqueeze(1).expand(4,2,5), torch.stack([y,y1], dim=1)), 0.25)
#export
def top_k_accuracy(inp, targ, k=5, axis=-1):
"Computes the Top-k accuracy (`targ` is in the top `k` predictions of `inp`)"
inp = inp.topk(k=k, dim=axis)[1]
targ = targ.unsqueeze(dim=axis).expand_as(inp)
return (inp == targ).sum(dim=-1).float().mean()
x = torch.randn(6,5)
y = torch.arange(0,6)
test_eq(top_k_accuracy(x[:5],y[:5]), 1)
test_eq(top_k_accuracy(x, y), 5/6)
#export
def APScore(axis=-1, average='macro', pos_label=1, sample_weight=None):
"Average Precision for single-label classification problems"
return skm_to_fastai(skm.average_precision_score, axis=axis,
average=average, pos_label=pos_label, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def BalancedAccuracy(axis=-1, sample_weight=None, adjusted=False):
"Balanced Accuracy for single-label binary classification problems"
return skm_to_fastai(skm.balanced_accuracy_score, axis=axis,
sample_weight=sample_weight, adjusted=adjusted)
See the scikit-learn documentation for more details.
#export
def BrierScore(axis=-1, sample_weight=None, pos_label=None):
"Brier score for single-label classification problems"
return skm_to_fastai(skm.brier_score_loss, axis=axis,
sample_weight=sample_weight, pos_label=pos_label)
See the scikit-learn documentation for more details.
#export
def CohenKappa(axis=-1, labels=None, weights=None, sample_weight=None):
"Cohen kappa for single-label classification problems"
return skm_to_fastai(skm.cohen_kappa_score, axis=axis,
sample_weight=sample_weight, pos_label=pos_label)
See the scikit-learn documentation for more details.
#export
def F1Score(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"F1 score for single-label classification problems"
return skm_to_fastai(skm.f1_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def FBeta(beta, axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"FBeta score with `beta` for single-label classification problems"
return skm_to_fastai(skm.fbeta_score, axis=axis,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def HammingLoss(axis=-1, labels=None, sample_weight=None):
"Cohen kappa for single-label classification problems"
return skm_to_fastai(skm.hamming_loss, axis=axis,
labels=labels, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def Jaccard(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Jaccard score for single-label classification problems"
return skm_to_fastai(skm.jaccard_similarity_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def MatthewsCorrCoef(axis=-1, sample_weight=None):
"Matthews correlation coefficient for single-label binary classification problems"
return skm_to_fastai(skm.matthews_corrcoef, axis=axis, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def Precision(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Precision for single-label classification problems"
return skm_to_fastai(skm.precision_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def Recall(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Recall for single-label classification problems"
return skm_to_fastai(skm.recall_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def RocAuc(axis=-1, average='macro', sample_weight=None, max_fpr=None):
"Area Under the Receiver Operating Characteristic Curve for single-label binary classification problems"
return skm_to_fastai(skm.recall_score, axis=axis,
laverage=average, sample_weight=sample_weight, max_fpr=max_fpr)
See the scikit-learn documentation for more details.
#export
class Perplexity(AvgLoss):
"Perplexity (exponential of cross-entropy loss) for Language Models"
@property
def value(self): return torch.exp(self.total/self.count) if self.count != 0 else None
@property
def name(self): return "perplexity"
perplexity = Perplexity()
x1,x2 = torch.randn(20,5),torch.randint(0, 5, (20,))
tst = perplexity
tst.reset()
vals = [0,6,15,20]
learn = TstLearner()
for i in range(3):
learn.y,learn.yb = x2[vals[i]:vals[i+1]],(x2[vals[i]:vals[i+1]],)
learn.loss = F.cross_entropy(x1[vals[i]:vals[i+1]],x2[vals[i]:vals[i+1]])
tst.accumulate(learn)
test_close(tst.value, torch.exp(F.cross_entropy(x1,x2)))
#export
def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
"Compute accuracy when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
return ((inp>thresh)==targ.bool()).float().mean()
#For testing
def change_1h_targ(targ, n):
idx = torch.randperm(targ.numel())[:n]
res = targ.clone().view(-1)
for i in idx: res[i] = 1-res[i]
return res.view(targ.shape)
x = torch.randn(4,5)
y = (torch.sigmoid(x) >= 0.5).byte()
test_eq(accuracy_multi(x,y), 1)
test_eq(accuracy_multi(x,1-y), 0)
y1 = change_1h_targ(y, 5)
test_eq(accuracy_multi(x,y1), 0.75)
#Different thresh
y = (torch.sigmoid(x) >= 0.2).byte()
test_eq(accuracy_multi(x,y, thresh=0.2), 1)
test_eq(accuracy_multi(x,1-y, thresh=0.2), 0)
y1 = change_1h_targ(y, 5)
test_eq(accuracy_multi(x,y1, thresh=0.2), 0.75)
#No sigmoid
y = (x >= 0.5).byte()
test_eq(accuracy_multi(x,y, sigmoid=False), 1)
test_eq(accuracy_multi(x,1-y, sigmoid=False), 0)
y1 = change_1h_targ(y, 5)
test_eq(accuracy_multi(x,y1, sigmoid=False), 0.75)
#export
def APScoreMulti(thresh=0.5, sigmoid=True, average='macro', pos_label=1, sample_weight=None):
"Average Precision for multi-label classification problems"
return skm_to_fastai(skm.average_precision_score, thresh=thresh, sigmoid=sigmoid,
average=average, pos_label=pos_label, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def BrierScoreMulti(thresh=0.5, sigmoid=True, sample_weight=None, pos_label=None):
"Brier score for multi-label classification problems"
return skm_to_fastai(skm.brier_score_loss, thresh=thresh, sigmoid=sigmoid,
sample_weight=sample_weight, pos_label=pos_label)
See the scikit-learn documentation for more details.
#export
def F1ScoreMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='binary', sample_weight=None):
"F1 score for multi-label classification problems"
return skm_to_fastai(skm.f1_score, thresh=thresh, sigmoid=sigmoid,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def FBetaMulti(beta, thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='binary', sample_weight=None):
"FBeta score with `beta` for multi-label classification problems"
return skm_to_fastai(skm.fbeta_score, thresh=thresh, sigmoid=sigmoid,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def HammingLossMulti(thresh=0.5, sigmoid=True, labels=None, sample_weight=None):
"Cohen kappa for multi-label classification problems"
return skm_to_fastai(skm.hamming_loss, thresh=thresh, sigmoid=sigmoid,
labels=labels, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def JaccardMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='binary', sample_weight=None):
"Jaccard score for multi-label classification problems"
return skm_to_fastai(skm.jaccard_similarity_score, thresh=thresh, sigmoid=sigmoid,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def MatthewsCorrCoefMulti(thresh=0.5, sigmoid=True, sample_weight=None):
"Matthews correlation coefficient for multi-label classification problems"
return skm_to_fastai(skm.matthews_corrcoef, thresh=thresh, sigmoid=sigmoid, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def PrecisionMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='binary', sample_weight=None):
"Precision for multi-label classification problems"
return skm_to_fastai(skm.precision_score, thresh=thresh, sigmoid=sigmoid,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def RecallMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='binary', sample_weight=None):
"Recall for multi-label classification problems"
return skm_to_fastai(skm.recall_score, thresh=thresh, sigmoid=sigmoid,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def RocAucMulti(thresh=0.5, sigmoid=True, average='macro', sample_weight=None, max_fpr=None):
"Area Under the Receiver Operating Characteristic Curve for multi-label binary classification problems"
return skm_to_fastai(skm.recall_score, thresh=thresh, sigmoid=sigmoid,
laverage=average, sample_weight=sample_weight, max_fpr=max_fpr)
See the scikit-learn documentation for more details.
#export
def mse(inp,targ):
"Mean squared error between `inp` and `targ`."
return F.mse_loss(*flatten_check(inp,targ))
x1,x2 = torch.randn(4,5),torch.randn(4,5)
test_close(mse(x1,x2), (x1-x2).pow(2).mean())
#export
def _rmse(inp, targ): return torch.sqrt(F.mse_loss(inp, targ))
rmse = AccumMetric(_rmse)
rmse.__doc__ = "Root mean squared error"
show_doc(rmse, name="rmse")
rmse
[source]Root mean squared error
x1,x2 = torch.randn(20,5),torch.randn(20,5)
test_eq(compute_val(rmse, x1, x2), torch.sqrt(F.mse_loss(x1,x2)))
#export
def mae(inp,targ):
"Mean absolute error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return torch.abs(inp - targ).mean()
x1,x2 = torch.randn(4,5),torch.randn(4,5)
test_eq(mae(x1,x2), torch.abs(x1-x2).mean())
#export
def msle(inp, targ):
"Mean squared logarithmic error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return F.mse_loss(torch.log(1 + inp), torch.log(1 + targ))
x1,x2 = torch.randn(4,5),torch.randn(4,5)
x1,x2 = torch.relu(x1),torch.relu(x2)
test_close(msle(x1,x2), (torch.log(x1+1)-torch.log(x2+1)).pow(2).mean())
#export
def _exp_rmspe(inp,targ):
inp,targ = torch.exp(inp),torch.exp(targ)
return torch.sqrt(((targ - inp)/targ).pow(2).mean())
exp_rmspe = AccumMetric(_exp_rmspe)
exp_rmspe.__doc__ = "Root mean square percentage error of the exponential of predictions and targets"
show_doc(exp_rmspe, name="exp_rmspe")
exp_rmspe
[source]Root mean square percentage error of the exponential of predictions and targets
x1,x2 = torch.randn(20,5),torch.randn(20,5)
test_eq(compute_val(exp_rmspe, x1, x2), torch.sqrt((((torch.exp(x2) - torch.exp(x1))/torch.exp(x2))**2).mean()))
#export
def ExplainedVariance(sample_weight=None):
"Explained variance betzeen predictions and targets"
return skm_to_fastai(skm.explained_variance_score, is_class=False, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def R2Score(sample_weight=None):
"R2 score betzeen predictions and targets"
return skm_to_fastai(skm.r2_score, is_class=False, sample_weight=sample_weight)
See the scikit-learn documentation for more details.
#export
def foreground_acc(inp, targ, bkg_idx=0, axis=1):
"Computes non-background accuracy for multiclass segmentation"
targ = targ.squeeze(1)
mask = targ != bkg_idx
return (inp.argmax(dim=axis)[mask]==targ[mask]).float().mean()
x = torch.randn(4,5,3,3)
y = x.argmax(dim=1)[:,None]
test_eq(foreground_acc(x,y), 1)
y[0] = 0 #the 0s are ignored so we get the same value
test_eq(foreground_acc(x,y), 1)
#export
class Dice(Metric):
"Dice coefficient metric for binary target in segmentation"
def __init__(self, axis=1): self.axis = axis
def reset(self): self.inter,self.union = 0,0
def accumulate(self, learn):
pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y)
self.inter += (pred*targ).float().sum().item()
self.union += (pred+targ).float().sum().item()
@property
def value(self): return 2. * self.inter/self.union if self.union > 0 else None
x1 = torch.randn(20,2,3,3)
x2 = torch.randint(0, 2, (20, 3, 3))
pred = x1.argmax(1)
inter = (pred*x2).float().sum().item()
union = (pred+x2).float().sum().item()
test_eq(compute_val(Dice(), x1, x2), 2*inter/union)
#export
class JaccardCoeff(Dice):
"Implemetation of the jaccard coefficient that is lighter in RAM"
@property
def value(self): return self.inter/(self.union-self.inter) if self.union > 0 else None
x1 = torch.randn(20,2,3,3)
x2 = torch.randint(0, 2, (20, 3, 3))
pred = x1.argmax(1)
inter = (pred*x2).float().sum().item()
union = (pred+x2).float().sum().item()
test_eq(compute_val(JaccardCoeff(), x1, x2), inter/(union-inter))
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
Converted 00_test.ipynb. Converted 01_core_foundation.ipynb. Converted 01a_core_utils.ipynb. Converted 01b_core_dispatch.ipynb. Converted 01c_core_transform.ipynb. Converted 02_core_script.ipynb. Converted 03_torchcore.ipynb. Converted 03a_layers.ipynb. Converted 04_data_load.ipynb. Converted 05_data_core.ipynb. Converted 06_data_transforms.ipynb. Converted 07_data_block.ipynb. Converted 08_vision_core.ipynb. Converted 09_vision_augment.ipynb. Converted 09a_vision_data.ipynb. Converted 10_pets_tutorial.ipynb. Converted 11_vision_models_xresnet.ipynb. Converted 12_optimizer.ipynb. Converted 13_learner.ipynb. Converted 13a_metrics.ipynb. Converted 14_callback_schedule.ipynb. Converted 14a_callback_data.ipynb. Converted 15_callback_hook.ipynb. Converted 15a_vision_models_unet.ipynb. Converted 16_callback_progress.ipynb. Converted 17_callback_tracker.ipynb. Converted 18_callback_fp16.ipynb. Converted 19_callback_mixup.ipynb. Converted 20_interpret.ipynb. Converted 20a_distributed.ipynb. Converted 21_vision_learner.ipynb. Converted 22_tutorial_imagenette.ipynb. Converted 23_tutorial_transfer_learning.ipynb. Converted 30_text_core.ipynb. Converted 31_text_data.ipynb. Converted 32_text_models_awdlstm.ipynb. Converted 33_text_models_core.ipynb. Converted 34_callback_rnn.ipynb. Converted 35_tutorial_wikitext.ipynb. Converted 36_text_models_qrnn.ipynb. Converted 37_text_learner.ipynb. Converted 38_tutorial_ulmfit.ipynb. Converted 40_tabular_core.ipynb. Converted 41_tabular_model.ipynb. Converted 42_tabular_rapids.ipynb. Converted 50_data_block_examples.ipynb. Converted 60_medical_imaging.ipynb. Converted 65_medical_text.ipynb. Converted 70_callback_wandb.ipynb. Converted 71_callback_tensorboard.ipynb. Converted 90_notebook_core.ipynb. Converted 91_notebook_export.ipynb. Converted 92_notebook_showdoc.ipynb. Converted 93_notebook_export2html.ipynb. Converted 94_notebook_test.ipynb. Converted 95_index.ipynb. Converted 96_data_external.ipynb. Converted 97_utils_test.ipynb. Converted notebook2jekyll.ipynb.