#default_exp core.transform
#export
from local.core.imports import *
from local.core.foundation import *
from local.core.utils import *
from local.core.dispatch import *
from local.test import *
from local.notebook.showdoc import show_doc
from PIL import Image
import torch
Definition of
Transform
andPipeline
The classes here provide functionality for creating a composition of partially reversible functions. By "partially reversible" we mean that a transform can be decode
d, creating a form suitable for display. This is not necessarily identical to the original form (e.g. a transform that changes a byte tensor to a float tensor does not recreate a byte tensor when decoded, since that may lose precision, and a float tensor can be displayed already).
Classes are also provided and for composing transforms, and mapping them over collections. Pipeline
is a transform which composes several Transform
, knowing how to decode them or show an encoded item.
ArrayImage
, ArrayImageBW
and ArrayMask
are subclasses of ndarray
that know how to show themselves.
#export
class ArrayBase(ndarray):
def __new__(cls, x, *args, **kwargs):
if isinstance(x,tuple): super().__new__(cls, x, *args, **kwargs)
if args or kwargs: raise RuntimeError('Unknown array init args')
if not isinstance(x,ndarray): x = array(x)
return x.view(cls)
#export
class ArrayImageBase(ArrayBase):
_show_args = {'cmap':'viridis'}
def show(self, ctx=None, **kwargs):
return show_image(self, ctx=ctx, **{**self._show_args, **kwargs})
#export
class ArrayImage(ArrayImageBase): pass
#export
class ArrayImageBW(ArrayImage): _show_args = {'cmap':'Greys'}
#export
class ArrayMask(ArrayImageBase): _show_args = {'alpha':0.5, 'cmap':'tab20'}
im = Image.open(TEST_IMAGE)
im_t = ArrayImage(im)
test_eq(type(im_t), ArrayImage)
im_t2 = ArrayMask(1)
test_eq(type(im_t2), ArrayMask)
test_eq(im_t2, array(1))
ax = im_t.show(figsize=(2,2))
test_fig_exists(ax)
#export
_tfm_methods = 'encodes','decodes','setups'
class _TfmDict(dict):
def __setitem__(self,k,v):
if k not in _tfm_methods or not callable(v): return super().__setitem__(k,v)
if k not in self: super().__setitem__(k,TypeDispatch())
self[k].add(v)
#export
class _TfmMeta(type):
def __new__(cls, name, bases, dict):
res = super().__new__(cls, name, bases, dict)
res.__signature__ = inspect.signature(res.__init__)
return res
def __call__(cls, *args, **kwargs):
f = args[0] if args else None
n = getattr(f,'__name__',None)
for nm in _tfm_methods:
if not hasattr(cls,nm): setattr(cls, nm, TypeDispatch())
if callable(f) and n in _tfm_methods:
getattr(cls,n).add(f)
return f
return super().__call__(*args, **kwargs)
@classmethod
def __prepare__(cls, name, bases): return _TfmDict()
#export
class Transform(metaclass=_TfmMeta):
"Delegates (`__call__`,`decode`,`setup`) to (`encodes`,`decodes`,`setups`) if `split_idx` matches"
split_idx,init_enc,as_item_force,as_item,order = None,False,None,True,0
def __init__(self, enc=None, dec=None, split_idx=None, as_item=False, order=None):
self.split_idx,self.as_item = ifnone(split_idx, self.split_idx),as_item
if order is not None: self.order=order
self.init_enc = enc or dec
if not self.init_enc: return
# Passing enc/dec, so need to remove (base) class level enc/dec
del(self.__class__.encodes,self.__class__.decodes,self.__class__.setups)
self.encodes,self.decodes,self.setups = TypeDispatch(),TypeDispatch(),TypeDispatch()
if enc:
self.encodes.add(enc)
self.order = getattr(enc,'order',self.order)
if dec: self.decodes.add(dec)
@property
def use_as_item(self): return ifnone(self.as_item_force, self.as_item)
def __call__(self, x, **kwargs): return self._call('encodes', x, **kwargs)
def decode (self, x, **kwargs): return self._call('decodes', x, **kwargs)
def setup(self, items=None): return self.setups(items)
def __repr__(self): return f'{self.__class__.__name__}: {self.use_as_item} {self.encodes} {self.decodes}'
def _call(self, fn, x, split_idx=None, **kwargs):
if split_idx!=self.split_idx and self.split_idx is not None: return x
f = getattr(self, fn)
if self.use_as_item or not is_listy(x): return self._do_call(f, x, **kwargs)
res = tuple(self._do_call(f, x_, **kwargs) for x_ in x)
return retain_type(res, x)
def _do_call(self, f, x, **kwargs):
return x if f is None else retain_type(f(x, **kwargs), x, f.returns_none(x))
add_docs(Transform, decode="Delegate to `decodes` to undo transform", setup="Delegate to `setups` to set up transform")
show_doc(Transform)
A Transform
is the main building block of the fastai data pipelines. In the most general terms a transform can be any function you want to apply to your data, however the Transform
class provides several mechanisms that make the process of building them easy and flexible.
Transform
features:¶is_listy
) of data is passed to a transform it will get applied to each element separately. Most comonly it will be a (x,y) tuple, but it can be anything for example a list of images. You can opt out of this behavior by setting the flag as_item=True
. For transforms that must always operate on the tuple level you can set as_item_force=True
which takes precedence over as_item
, an example of that is PointScaler
.decodes
method. This is mainly used to turn something like a category which is encoded as a number back into a label understandable by humans for showing purposes.ArrayImage
which is a thin wrapper of pytorches Tensor
. You can opt out of this behavior by adding ->None
return type annotation.setup
method can be used to perform any one-time calculations to be later used by the transform, for example generating a vocabulary to encode categorical data.split_idx
flag you can make the transform be used only in a specific DataSource
subset like in training, but not validation.order
attribute which the Pipeline
uses when it needs to merge two lists of transforms.Transform
by creating encodes
or decodes
methods for new data types. You can put those new methods outside the original transform definition and decorate them with the class you wish them patched into. This can be used by the fastai library users to add their own behavior, or multiple modules contributing to the same transform.Transform
¶There are a few ways to create a transform with different ratios of simplicity to flexibility.
Transform
class - Use inheritence to implement the methods you want.Transform
class and pass your functions as enc
and dec
arguments.Transform
by just adding a decorator - very straightforward if all you need is a single encodes
implementation.Pipeline
or TfmdDS
you don't even need a decorator. Your function will get converted to a Transform
automatically.class A(Transform): pass
@A
def encodes(self, x): return x+1
f1 = A()
test_eq(f1(1), 2)
class B(A): pass
f2 = B()
test_eq(f2(1), 2)
class A(Transform): pass
f3 = A()
test_eq_type(f3(2), 2)
test_eq_type(f3.decode(2.0), 2.0)
Transform
can be used as a decorator, to turn a function into a Transform
.
f = Transform(lambda o:o//2)
test_eq_type(f(2), 1)
test_eq_type(f.decode(2.0), 2.0)
@Transform
def f(x): return x//2
test_eq_type(f(2), 1)
test_eq_type(f.decode(2.0), 2.0)
@Transform
def f(x): return x*2
test_eq_type(f(2), 4)
test_eq_type(f.decode(2.0), 2.0)
You can derive from Transform
and use encodes
for your encoding function.
class A(Transform):
def encodes(self, x:ArrayImage): return -x
def decodes(self, x:ArrayImage): return x+1
def setups (self, x:ArrayImage): x.foo = 'a'
f = A()
t = f(im_t)
test_eq(t, -im_t)
test_eq(f(1), 1)
test_eq(type(t), ArrayImage)
test_eq(f.decode(t), -im_t+1)
test_eq(f.decode(1), 1)
f.setup(im_t)
test_eq(im_t.foo, 'a')
t2 = array(1)
f.setup(t2)
assert not hasattr(f2,'foo')
f
A: False (ArrayImage,object) -> encodes (ArrayImage,object) -> decodes
Without return annotation we get an Int
back since that's what was passed.
class A(Transform): pass
@A
def encodes(self, x:Int): return x//2
@A
def encodes(self, x:float): return x+1
f = A()
test_eq_type(f(Int(2)), Int(1))
test_eq_type(f(2), 2)
test_eq_type(f(2.), 3.)
Without return annotation we don't cast if we're not a subclass of the input type. If the annotation is a tuple, then any type in the tuple will match.
class A(Transform):
def encodes(self, x:(Int,float)): return x/2
def encodes(self, x:(str,list)): return str(x)+'1'
f = A()
test_eq_type(f(Int(2)), 1.)
test_eq_type(f(2), 2)
test_eq_type(f(Float(2.)), Float(1.))
test_eq_type(f('a'), 'a1')
With return annotation None
we get back whatever Python creates usually.
def func(x)->None: return x/2
f = Transform(func)
test_eq_type(f(2), 1.)
test_eq_type(f(2.), 1.)
Since decodes
has no return annotation, but encodes
created an Int
and we pass that result here to decode
, we end up with an Int
.
def func(x): return Int(x+1)
def dec (x): return x-1
f = Transform(func,dec)
t = f(1)
test_eq_type(t, Int(2))
test_eq_type(f.decode(t), Int(1))
If the transform has split_idx
then it's only applied if split_idx
param matches.
f.split_idx = 1
test_eq(f(1, split_idx=1),2)
test_eq_type(f(1, split_idx=0), 1)
If as_item=True
the transform takes tuples as a whole and is applied to them.
class A(Transform):
def encodes(self, xy): x,y=xy; return (x+y,y)
def decodes(self, xy): x,y=xy; return (x-y,y)
f = A(as_item=True)
t = f((1,2))
test_eq(t, (3,2))
test_eq(f.decode(t), (1,2))
f.split_idx = 1
test_eq(f((1,2), split_idx=1), (3,2))
test_eq(f((1,2), split_idx=0), (1,2))
class AL(Transform): pass
@AL
def encodes(self, x): return L(x_+1 for x_ in x)
@AL
def decodes(self, x): return L(x_-1 for x_ in x)
f = AL(as_item=True)
t = f([1,2])
test_eq(t, [2,3])
test_eq(f.decode(t), [1,2])
If as_item=False
the transform is applied to each element of a listy input.
def neg_int(x:numbers.Integral): return -x
f = Transform(neg_int, as_item=False)
test_eq(f([1]), (-1,))
test_eq(f([1.]), (1.,))
test_eq(f([1.,2,3.]), (1.,-2,3.))
test_eq(f.decode([1,2]), (1,2))
#export
class InplaceTransform(Transform):
"A `Transform` that modifies in-place and just returns whatever it's passed"
def _call(self, fn, x, split_idx=None, **kwargs):
super()._call(fn,x,split_idx,**kwargs)
return x
class A(InplaceTransform): pass
@A
def encodes(self, x:pd.Series): x.fillna(10, inplace=True)
f = A()
test_eq_type(f(pd.Series([1,2,None])),pd.Series([1,2,10]))
#export
class TupleTransform(Transform):
"`Transform` that always treats `as_item` as `False`"
as_item_force=False
#export
class ItemTransform (Transform):
"`Transform` that always treats `as_item` as `True`"
as_item_force=True
def float_to_int(x:(float,int)): return Int(x)
f = TupleTransform(float_to_int)
test_eq_type(f([1.]), (Int(1),))
test_eq_type(f([1]), (Int(1),))
test_eq_type(f(['1']), ('1',))
test_eq_type(f([1,'1']), (Int(1),'1'))
test_eq(f.decode([1]), [1])
test_eq_type(f(Tuple(1.)), Tuple(Int(1)))
class B(TupleTransform): pass
class C(TupleTransform): pass
f = B()
test_eq(f([1]), [1])
@B
def encodes(self, x:int): return x+1
@B
def encodes(self, x:str): return x+'1'
@B
def encodes(self, x)->None: return str(x)+'!'
b,c = B(),C()
test_eq(b([1]), [2])
test_eq(b(['1']), ('11',))
test_eq(b([1.0]), ('1.0!',))
test_eq(c([1]), [1])
test_eq(b([1,2]), (2,3))
test_eq(b.decode([2]), [2])
assert pickle.loads(pickle.dumps(b))
@B
def decodes(self, x:int): return x-1
test_eq(b.decode([2]), [1])
test_eq(b.decode(('2',)), ('2',))
Non-type-constrained functions are applied to all elements of a tuple.
class A(TupleTransform): pass
@A
def encodes(self, x): return x+1
@A
def decodes(self, x): return x-1
f = A()
t = f((1,2.0))
test_eq_type(t, (2,3.0))
test_eq_type(f.decode(t), (1,2.0))
Type-constrained functions are applied to only matching elements of a tuple, and return annotations are only applied where matching.
class B(TupleTransform):
def encodes(self, x:int): return Int(x+1)
def encodes(self, x:str): return x+'1'
def decodes(self, x:Int): return x//2
f = B()
start = (1.,2,'3')
t = f(start)
test_eq_type(t, (1.,Int(3),'31'))
test_eq(f.decode(t), (1.,Int(1),'31'))
The same behavior also works with typing
module type classes.
class A(Transform): pass
@A
def encodes(self, x:numbers.Integral): return x+1
@A
def encodes(self, x:float): return x*3
@A
def decodes(self, x:int): return x-1
f = A()
start = 1.0
t = f(start)
test_eq(t, 3.)
test_eq(f.decode(t), 3)
f = A(as_item=False)
start = (1.,2,3.)
t = f(start)
test_eq(t, (3.,3,9.))
test_eq(f.decode(t), (3.,2,9.))
Transform accepts lists
def a(x): return L(x_+1 for x_ in x)
def b(x): return L(x_-1 for x_ in x)
f = TupleTransform(a,b)
t = f((L(1,2),))
test_eq(t, (L(2,3),))
test_eq(f.decode(t), (L(1,2),))
#export
def get_func(t, name, *args, **kwargs):
"Get the `t.name` (potentially partial-ized with `args` and `kwargs`) or `noop` if not defined"
f = getattr(t, name, noop)
return f if not (args or kwargs) else partial(f, *args, **kwargs)
This works for any kind of t
supporting getattr
, so a class or a module.
test_eq(get_func(operator, 'neg', 2)(), -2)
test_eq(get_func(operator.neg, '__call__')(2), -2)
test_eq(get_func(list, 'foobar')([2]), [2])
t = get_func(torch, 'zeros', dtype=torch.int64)(5)
test_eq(t.dtype, torch.int64)
a = [2,1]
get_func(list, 'sort')(a)
test_eq(a, [1,2])
Transforms are built with multiple-dispatch: a given function can have several methods depending on the type of the object received. This is done directly with the TypeDispatch
module and type-annotation in Transform
, but you can also use the following class.
#export
class Func():
"Basic wrapper around a `name` with `args` and `kwargs` to call on a given type"
def __init__(self, name, *args, **kwargs): self.name,self.args,self.kwargs = name,args,kwargs
def __repr__(self): return f'sig: {self.name}({self.args}, {self.kwargs})'
def _get(self, t): return get_func(t, self.name, *self.args, **self.kwargs)
def __call__(self,t): return mapped(self._get, t)
You can call the Func
object on any module name or type, even a list of types. It will return the corresponding function (with a default to noop
if nothing is found) or list of functions.
test_eq(Func('sqrt')(math), math.sqrt)
test_eq(Func('sqrt')(torch), torch.sqrt)
@patch
def powx(x:math, a): return math.pow(x,a)
@patch
def powx(x:torch, a): return torch.pow(x,a)
tst = Func('powx',a=2)([math, torch])
test_eq([f.func for f in tst], [math.powx, torch.powx])
for t in tst: test_eq(t.keywords, {'a': 2})
#export
class _Sig():
def __getattr__(self,k):
def _inner(*args, **kwargs): return Func(k, *args, **kwargs)
return _inner
Sig = _Sig()
show_doc(Sig, name="Sig")
Sig
[source]
Sig
(***args
, **kwargs
**)
Sig
is just sugar-syntax to create a Func
object more easily with the syntax Sig.name(*args, **kwargs)
.
f = Sig.sqrt()
test_eq(f(math), math.sqrt)
test_eq(f(torch), torch.sqrt)
#export
def compose_tfms(x, tfms, is_enc=True, reverse=False, **kwargs):
"Apply all `func_nm` attribute of `tfms` on `x`, maybe in `reverse` order"
if reverse: tfms = reversed(tfms)
for f in tfms:
if not is_enc: f = f.decode
x = f(x, **kwargs)
return x
def to_int (x): return Int(x)
def to_float(x): return Float(x)
def double (x): return x*2
def half(x)->None: return x/2
def test_compose(a, b, *fs): test_eq_type(compose_tfms(a, tfms=map(Transform,fs)), b)
test_compose(1, Int(1), to_int)
test_compose(1, Float(1), to_int,to_float)
test_compose(1, Float(2), to_int,to_float,double)
test_compose(2.0, 2.0, to_int,double,half)
class A(Transform):
def encodes(self, x:float): return Float(x+1)
def decodes(self, x): return x-1
tfms = [A(), Transform(math.sqrt)]
t = compose_tfms(3., tfms=tfms)
test_eq_type(t, Float(2.))
test_eq(compose_tfms(t, tfms=tfms, is_enc=False), 1.)
test_eq(compose_tfms(4., tfms=tfms, reverse=True), 3.)
tfms = [A(as_item=False), Transform(math.sqrt, as_item=False)]
test_eq(compose_tfms((9,3.), tfms=tfms), (3,2.))
#export
def mk_transform(f, as_item=True):
"Convert function `f` to `Transform` if it isn't already one"
f = instantiate(f)
return f if isinstance(f,Transform) else Transform(f, as_item=as_item)
def neg(x): return -x
test_eq(type(mk_transform(neg)), Transform)
test_eq(type(mk_transform(math.sqrt)), Transform)
test_eq(type(mk_transform(lambda a:a*2)), Transform)
#export
def gather_attrs(o, k, nm):
"Used in __getattr__ to collect all attrs `k` from `self.{nm}`"
if k.startswith('_') or k==nm: raise AttributeError(k)
att = getattr(o,nm)
res = [t for t in att.attrgot(k) if t is not None]
if not res: raise AttributeError(k)
return res[0] if len(res)==1 else L(res)
#export
def gather_attr_names(o, nm):
"Used in __dir__ to collect all attrs `k` from `self.{nm}`"
return L(getattr(o,nm)).map(dir).concat().unique()
#export
class Pipeline:
"A pipeline of composed (for encode/decode) transforms, setup with types"
def __init__(self, funcs=None, as_item=False, split_idx=None):
self.split_idx,self.default = split_idx,None
if isinstance(funcs, Pipeline): self.fs = funcs.fs
else:
if isinstance(funcs, Transform): funcs = [funcs]
self.fs = L(ifnone(funcs,[noop])).map(mk_transform).sorted(key='order')
for f in self.fs:
name = camel2snake(type(f).__name__)
a = getattr(self,name,None)
if a is not None: f = L(a)+f
setattr(self, name, f)
self.set_as_item(as_item)
def set_as_item(self, as_item):
self.as_item = as_item
for f in self.fs: f.as_item = as_item
def setup(self, items=None):
tfms = self.fs[:]
self.fs.clear()
for t in tfms: self.add(t,items)
def add(self,t, items=None):
t.setup(items)
self.fs.append(t)
def __call__(self, o): return compose_tfms(o, tfms=self.fs, split_idx=self.split_idx)
def __repr__(self): return f"Pipeline: {self.fs}"
def __getitem__(self,i): return self.fs[i]
def __setstate__(self,data): self.__dict__.update(data)
def __getattr__(self,k): return gather_attrs(self, k, 'fs')
def __dir__(self): return super().__dir__() + gather_attr_names(self, 'fs')
def decode (self, o, full=True):
if full: return compose_tfms(o, tfms=self.fs, is_enc=False, reverse=True, split_idx=self.split_idx)
#Not full means we decode up to the point the item knows how to show itself.
for f in reversed(self.fs):
if self._is_showable(o): return o
o = f.decode(o, split_idx=self.split_idx)
return o
def show(self, o, ctx=None, **kwargs):
o = self.decode(o, full=False)
o1 = [o] if self.as_item or not is_listy(o) else o
for o_ in o1:
if hasattr(o_, 'show'): ctx = o_.show(ctx=ctx, **kwargs)
return ctx
def _is_showable(self, o):
return all(hasattr(o_, 'show') for o_ in o) if is_listy(o) else hasattr(o, 'show')
add_docs(Pipeline,
__call__="Compose `__call__` of all `fs` on `o`",
decode="Compose `decode` of all `fs` on `o`",
show="Show `o`, a single item from a tuple, decoding as needed",
add="Add transform `t`",
set_as_item="Set value of `as_item` for all transforms",
setup="Call each tfm's `setup` in order")
Pipeline
is a wrapper for compose_tfm
. You can pass instances of Transform
or regular functions in funcs
, the Pipeline
will wrap them all in Transform
(and instantiate them if needed) during the initialization. It handles the transform setup
by adding them one at a time and calling setup on each, goes through them in order in __call__
or decode
and can show
an object by applying decoding the transforms up until the point it gets an object that knows how to show itself.
# Empty pipeline is noop
pipe = Pipeline()
test_eq(pipe(1), 1)
pipe.set_as_item(False)
test_eq(pipe((1,)), (1,))
# Check pickle works
assert pickle.loads(pickle.dumps(pipe))
class IntFloatTfm(Transform):
def encodes(self, x): return Int(x)
def decodes(self, x): return Float(x)
foo=1
int_tfm=IntFloatTfm()
def neg(x): return -x
neg_tfm = Transform(neg, neg)
pipe = Pipeline([neg_tfm, int_tfm])
start = 2.0
t = pipe(start)
test_eq_type(t, Int(-2))
test_eq_type(pipe.decode(t), Float(start))
test_stdout(lambda:pipe.show(t), '-2')
pipe.set_as_item(False)
test_stdout(lambda:pipe.show(pipe((1.,2.))), '-1\n-2')
test_eq(pipe.foo, 1)
assert 'foo' in dir(pipe)
assert 'int_float_tfm' in dir(pipe)
Transforms are available as attributes named with the snake_case version of the names of their types. Attributes in transforms can be directly accessed as attributes of the pipeline.
test_eq(pipe.int_float_tfm, int_tfm)
test_eq(pipe.foo, 1)
pipe = Pipeline([int_tfm, int_tfm])
pipe.int_float_tfm
test_eq(pipe.int_float_tfm[0], int_tfm)
test_eq(pipe.foo, [1,1])
# Check opposite order
pipe = Pipeline([int_tfm,neg_tfm])
t = pipe(start)
test_eq(t, -2)
test_stdout(lambda:pipe.show(t), '-2')
class A(Transform):
def encodes(self, x): return int(x)
def decodes(self, x): return Float(x)
pipe = Pipeline([neg_tfm, A])
t = pipe(start)
test_eq_type(t, -2)
test_eq_type(pipe.decode(t), Float(start))
test_stdout(lambda:pipe.show(t), '-2.0')
s2 = (1,2)
pipe.set_as_item(False)
t = pipe(s2)
test_eq_type(t, (-1,-2))
test_eq_type(pipe.decode(t), (Float(1.),Float(2.)))
test_stdout(lambda:pipe.show(t), '-1.0\n-2.0')
class B(Transform):
def encodes(self, x): return x+1
def decodes(self, x): return x-1
from PIL import Image
def f1(x:ArrayImage): return -x
def f2(x): return Image.open(x).resize((128,128))
def f3(x:Image.Image): return(ArrayImage(array(x)))
pipe = Pipeline([f2,f3,f1])
t = pipe(TEST_IMAGE)
test_eq(type(t), ArrayImage)
test_eq(t, -array(f3(f2(TEST_IMAGE))))
pipe = Pipeline([f2,f3])
t = pipe(TEST_IMAGE)
ax = pipe.show(t)
test_fig_exists(ax)
#Check filtering is properly applied
add1 = B()
add1.split_idx = 1
pipe = Pipeline([neg_tfm, A(), add1])
test_eq(pipe(start), -2)
pipe.split_idx=1
test_eq(pipe(start), -1)
pipe.split_idx=0
test_eq(pipe(start), -2)
for t in [None, 0, 1]:
pipe.split_idx=t
test_eq(pipe.decode(pipe(start)), start)
test_stdout(lambda: pipe.show(pipe(start)), "-2.0")
#TODO: method examples
show_doc(Pipeline.__call__)
show_doc(Pipeline.decode)
show_doc(Pipeline.setup)
During the setup, the Pipeline
starts with no transform and adds them one at a time, so that during its setup, each transform gets the items processed up to its point and not after.
#hide
#Test is with TfmdList
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
Converted 00_test.ipynb. Converted 01_core_foundation.ipynb. Converted 01a_core_utils.ipynb. Converted 01b_core_dispatch.ipynb. Converted 01c_core_transform.ipynb. Converted 02_core_script.ipynb. Converted 03_torchcore.ipynb. Converted 03a_layers.ipynb. Converted 04_data_load.ipynb. Converted 05_data_core.ipynb. Converted 06_data_transforms.ipynb. Converted 07_data_block.ipynb. Converted 08_vision_core.ipynb. Converted 09_vision_augment.ipynb. Converted 09a_vision_data.ipynb. Converted 10_pets_tutorial.ipynb. Converted 11_vision_models_xresnet.ipynb. Converted 12_optimizer.ipynb. Converted 13_learner.ipynb. Converted 13a_metrics.ipynb. Converted 14_callback_schedule.ipynb. Converted 14a_callback_data.ipynb. Converted 15_callback_hook.ipynb. Converted 15a_vision_models_unet.ipynb. Converted 16_callback_progress.ipynb. Converted 17_callback_tracker.ipynb. Converted 18_callback_fp16.ipynb. Converted 19_callback_mixup.ipynb. Converted 20_interpret.ipynb. Converted 20a_distributed.ipynb. Converted 21_vision_learner.ipynb. Converted 22_tutorial_imagenette.ipynb. Converted 23_tutorial_transfer_learning.ipynb. Converted 30_text_core.ipynb. Converted 31_text_data.ipynb. Converted 32_text_models_awdlstm.ipynb. Converted 33_text_models_core.ipynb. Converted 34_callback_rnn.ipynb. Converted 35_tutorial_wikitext.ipynb. Converted 36_text_models_qrnn.ipynb. Converted 37_text_learner.ipynb. Converted 38_tutorial_ulmfit.ipynb. Converted 40_tabular_core.ipynb. Converted 41_tabular_model.ipynb. Converted 42_tabular_rapids.ipynb. Converted 50_data_block_examples.ipynb. Converted 60_medical_imaging.ipynb. Converted 65_medical_text.ipynb. Converted 70_callback_wandb.ipynb. Converted 71_callback_tensorboard.ipynb. Converted 90_notebook_core.ipynb. Converted 91_notebook_export.ipynb. Converted 92_notebook_showdoc.ipynb. Converted 93_notebook_export2html.ipynb. Converted 94_notebook_test.ipynb. Converted 95_index.ipynb. Converted 96_data_external.ipynb. Converted 97_utils_test.ipynb. Converted notebook2jekyll.ipynb.