#export
from local.test import *
from local.basics import *
from local.notebook.showdoc import *
#default_exp callback.progress
Callback and helper function to track progress of training or log results
from local.test_utils import *
# export
@docs
class ProgressCallback(Callback):
"A `Callback` to handle the display of progress bars"
run_after=Recorder
def begin_fit(self):
assert hasattr(self.learn, 'recorder')
self.mbar = master_bar(list(range(self.n_epoch)))
self.mbar.on_iter_begin()
if self.learn.logger != noop:
self.old_logger,self.learn.logger = self.logger,self._write_stats
self._write_stats(self.recorder.metric_names)
else: self.old_logger = noop
def begin_epoch(self): self.mbar.update(self.epoch)
def begin_train(self): self._launch_pbar()
def begin_validate(self): self._launch_pbar()
def after_train(self): self.pbar.on_iter_end()
def after_validate(self): self.pbar.on_iter_end()
def after_batch(self):
self.pbar.update(self.iter+1)
if hasattr(self, 'smooth_loss'): self.pbar.comment = f'{self.smooth_loss:.4f}'
def _launch_pbar(self):
self.pbar = progress_bar(self.dl, parent=self.mbar)
self.pbar.update(0)
def after_fit(self):
self.mbar.on_iter_end()
self.learn.logger = self.old_logger
def _write_stats(self, log):
self.mbar.write([f'{l:.6f}' if isinstance(l, float) else str(l) for l in log], table=True)
_docs = dict(begin_fit="Setup the master bar over the epochs",
begin_epoch="Update the master bar",
begin_train="Launch a progress bar over the training dataloader",
begin_validate="Launch a progress bar over the validation dataloader",
after_train="Close the progress bar over the training dataloader",
after_validate="Close the progress bar over the validation dataloader",
after_batch="Update the current progress bar",
after_fit="Close the master bar")
defaults.callbacks = [TrainEvalCallback, Recorder, ProgressCallback]
learn = synth_learner()
learn.fit(5)
epoch | train_loss | valid_loss | time |
---|---|---|---|
0 | 14.759526 | 12.156830 | 00:00 |
1 | 11.549006 | 8.656103 | 00:00 |
2 | 7.894700 | 5.738920 | 00:00 |
3 | 5.168761 | 3.628154 | 00:00 |
4 | 3.266419 | 2.235523 | 00:00 |
#hide
assert not learn.progress.mbar.child.is_active
lines = learn.progress.mbar.lines
test_eq(learn.recorder.metric_names, lines[0])
for i,(l,v) in enumerate(zip(lines[1:],learn.recorder.values)):
test_eq(l[:-1], [str(i)] + [f'{x:.6f}' for x in v])
#hide
#Check validate works without any training
def tst_metric(out, targ): return F.mse_loss(out, targ)
learn = synth_learner(n_trn=5, metrics=tst_metric)
preds,targs = learn.validate()
#hide
#Check get_preds works without any training
learn = synth_learner(n_trn=5, metrics=tst_metric)
preds,targs = learn.validate()
show_doc(ProgressCallback.begin_fit)
ProgressCallback.begin_fit
[source]
ProgressCallback.begin_fit
()
Setup the master bar over the epochs
show_doc(ProgressCallback.begin_epoch)
show_doc(ProgressCallback.begin_train)
ProgressCallback.begin_train
[source]
ProgressCallback.begin_train
()
Launch a progress bar over the training dataloader
show_doc(ProgressCallback.begin_validate)
ProgressCallback.begin_validate
[source]
ProgressCallback.begin_validate
()
Launch a progress bar over the validation dataloader
show_doc(ProgressCallback.after_batch)
show_doc(ProgressCallback.after_train)
ProgressCallback.after_train
[source]
ProgressCallback.after_train
()
Close the progress bar over the training dataloader
show_doc(ProgressCallback.after_validate)
ProgressCallback.after_validate
[source]
ProgressCallback.after_validate
()
Close the progress bar over the validation dataloader
show_doc(ProgressCallback.after_fit)
# export
class ShowGraphCallback(Callback):
"Update a graph of training and validation loss"
run_after=ProgressCallback
def begin_fit(self):
self.nb_batches = []
assert hasattr(self.learn, 'progress')
def after_train(self): self.nb_batches.append(self.train_iter)
def after_epoch(self):
"Plot validation loss in the pbar graph"
rec = self.learn.recorder
iters = range_of(rec.losses)
val_losses = [v[1] for v in rec.values]
x_bounds = (0, (self.n_epoch - len(self.nb_batches)) * self.nb_batches[0] + len(rec.losses))
y_bounds = (0, max((max(Tensor(rec.losses)), max(Tensor(val_losses)))))
self.progress.mbar.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds)
#slow
learn = synth_learner(cbs=ShowGraphCallback())
learn.fit(10)
epoch | train_loss | valid_loss | time |
---|---|---|---|
0 | 15.115982 | 8.124483 | 00:00 |
1 | 11.676845 | 5.935443 | 00:00 |
2 | 8.227362 | 3.910162 | 00:00 |
3 | 5.236221 | 2.500864 | 00:00 |
4 | 3.223713 | 1.592831 | 00:00 |
5 | 1.967296 | 1.012433 | 00:00 |
6 | 1.215965 | 0.640053 | 00:00 |
7 | 0.748091 | 0.402498 | 00:00 |
8 | 0.444547 | 0.260059 | 00:00 |
9 | 0.271614 | 0.171020 | 00:00 |
# export
class CSVLogger(Callback):
run_after=Recorder
"Log the results displayed in `learn.path/fname`"
def __init__(self, fname='history.csv', append=False):
self.fname,self.append = Path(fname),append
def read_log(self):
"Convenience method to quickly access the log."
return pd.read_csv(self.path/self.fname)
def begin_fit(self):
"Prepare file with metric names."
self.path.parent.mkdir(parents=True, exist_ok=True)
self.file = (self.path/self.fname).open('a' if self.append else 'w')
self.file.write(','.join(self.recorder.metric_names) + '\n')
self.old_logger,self.learn.logger = self.logger,self._write_line
def _write_line(self, log):
"Write a line with `log` and call the old logger."
self.file.write(','.join([str(t) for t in log]) + '\n')
self.old_logger(log)
def after_fit(self):
"Close the file and clean up."
self.file.close()
self.learn.logger = self.old_logger
The results are appened to an existing file if append
, or they overwrite it otherwise.
learn = synth_learner(cbs=CSVLogger())
learn.fit(5)
epoch | train_loss | valid_loss | time |
---|---|---|---|
0 | 14.535604 | 10.573635 | 00:00 |
1 | 11.405640 | 7.535136 | 00:00 |
2 | 7.865438 | 4.952577 | 00:00 |
3 | 5.154984 | 3.121027 | 00:00 |
4 | 3.272465 | 1.925196 | 00:00 |
show_doc(CSVLogger.read_log)
df = learn.csv_logger.read_log()
test_eq(df.columns.values, learn.recorder.metric_names)
for i,v in enumerate(learn.recorder.values):
test_close(df.iloc[i][:3], [i] + v)
os.remove(learn.path/learn.csv_logger.fname)
show_doc(CSVLogger.begin_fit)
show_doc(CSVLogger.after_fit)
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
Converted 00_test.ipynb. Converted 01_core.ipynb. Converted 01a_torch_core.ipynb. Converted 02_script.ipynb. Converted 03_dataloader.ipynb. Converted 04_transform.ipynb. Converted 05_data_core.ipynb. Converted 06_data_transforms.ipynb. Converted 07_vision_core.ipynb. Converted 08_pets_tutorial.ipynb. Converted 09_vision_augment.ipynb. Converted 11_layers.ipynb. Converted 11a_vision_models_xresnet.ipynb. Converted 12_optimizer.ipynb. Converted 13_learner.ipynb. Converted 14_callback_schedule.ipynb. Converted 15_callback_hook.ipynb. Converted 16_callback_progress.ipynb. Converted 17_callback_tracker.ipynb. Converted 18_callback_fp16.ipynb. Converted 19_callback_mixup.ipynb. Converted 20_metrics.ipynb. Converted 21_tutorial_imagenette.ipynb. Converted 22_vision_learner.ipynb. Converted 23_tutorial_transfer_learning.ipynb. Converted 30_text_core.ipynb. Converted 31_text_data.ipynb. Converted 32_text_models_awdlstm.ipynb. Converted 33_text_models_core.ipynb. Converted 34_callback_rnn.ipynb. Converted 35_tutorial_wikitext.ipynb. Converted 36_text_models_qrnn.ipynb. Converted 37_text_learner.ipynb. Converted 38_tutorial_ulmfit.ipynb. Converted 40_tabular_core.ipynb. Converted 41_tabular_model.ipynb. Converted 42_tabular_rapids.ipynb. Converted 50_data_block.ipynb. Converted 90_notebook_core.ipynb. Converted 91_notebook_export.ipynb. Converted 92_notebook_showdoc.ipynb. Converted 93_notebook_export2html.ipynb. Converted 94_index.ipynb. Converted 95_utils_test.ipynb. Converted 96_data_external.ipynb. Converted notebook2jekyll.ipynb.