Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
Author: Sebastian Raschka Python implementation: CPython Python version : 3.8.8 IPython version : 7.21.0 torch: 1.8.1+cu111
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, "..") # to include ../helper_evaluate.py etc.
# From local helper files
from helper_utils import set_all_seeds, set_deterministic
from helper_evaluate import compute_confusion_matrix, compute_accuracy
from helper_train import train_classifier_simple_v2
from helper_plotting import plot_training_loss, plot_accuracy, show_examples, plot_confusion_matrix
from helper_data import get_dataloaders_cifar10, UnNormalize
##########################
### SETTINGS
##########################
RANDOM_SEED = 123
BATCH_SIZE = 128
NUM_EPOCHS = 150
DEVICE = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
set_all_seeds(RANDOM_SEED)
#set_deterministic()
##########################
### CIFAR-10 DATASET
##########################
### Note: Network trains about 2-3x faster if you don't
# resize (keeping the orig. 32x32 res.)
# Test acc. I got via the 32x32 was lower though; ~77%
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((70, 70)),
torchvision.transforms.RandomCrop((64, 64)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((70, 70)),
torchvision.transforms.CenterCrop((64, 64)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_loader, valid_loader, test_loader = get_dataloaders_cifar10(
batch_size=BATCH_SIZE,
validation_fraction=0.1,
train_transforms=train_transforms,
test_transforms=test_transforms,
num_workers=2)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
print('Class labels of 10 examples:', labels[:10])
break
Files already downloaded and verified Image batch dimensions: torch.Size([128, 3, 64, 64]) Image label dimensions: torch.Size([128]) Class labels of 10 examples: tensor([4, 7, 4, 6, 2, 6, 9, 7, 3, 0])
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(np.transpose(torchvision.utils.make_grid(images[:64],
padding=2, normalize=True),
(1, 2, 0)))
<matplotlib.image.AxesImage at 0x7f89d98d9ac0>
##########################
### MODEL
##########################
model = torch.hub.load('pytorch/vision:v0.9.0', 'mobilenet_v2',
pretrained=False)
model.classifier[-1] = torch.nn.Linear(in_features=1280, # as in original
out_features=10) # number of class labels in Cifar-10)
model = model.to(DEVICE)
Using cache found in /home/raschka/.cache/torch/hub/pytorch_vision_v0.9.0
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
minibatch_loss_list, train_acc_list, valid_acc_list = train_classifier_simple_v2(
model=model,
num_epochs=NUM_EPOCHS,
train_loader=train_loader,
valid_loader=valid_loader,
test_loader=test_loader,
optimizer=optimizer,
best_model_save_path='mobilenet-v2-best-1.pt',
device=DEVICE,
scheduler_on='valid_acc',
logging_interval=100)
plot_training_loss(minibatch_loss_list=minibatch_loss_list,
num_epochs=NUM_EPOCHS,
iter_per_epoch=len(train_loader),
results_dir=None,
averaging_iterations=200)
plt.show()
plot_accuracy(train_acc_list=train_acc_list,
valid_acc_list=valid_acc_list,
results_dir=None)
plt.ylim([60, 100])
plt.show()
Epoch: 001/150 | Batch 0000/0351 | Loss: 2.3627 Epoch: 001/150 | Batch 0100/0351 | Loss: 1.9649 Epoch: 001/150 | Batch 0200/0351 | Loss: 1.7862 Epoch: 001/150 | Batch 0300/0351 | Loss: 1.9222 Epoch: 001/150 | Train: 32.20% | Validation: 32.60% | Best Validation (Ep. 001): 32.60% Time elapsed: 0.99 min Epoch: 002/150 | Batch 0000/0351 | Loss: 1.9170 Epoch: 002/150 | Batch 0100/0351 | Loss: 1.7127 Epoch: 002/150 | Batch 0200/0351 | Loss: 1.8019 Epoch: 002/150 | Batch 0300/0351 | Loss: 1.6782 Epoch: 002/150 | Train: 34.07% | Validation: 33.52% | Best Validation (Ep. 002): 33.52% Time elapsed: 2.00 min Epoch: 003/150 | Batch 0000/0351 | Loss: 1.7150 Epoch: 003/150 | Batch 0100/0351 | Loss: 1.6087 Epoch: 003/150 | Batch 0200/0351 | Loss: 1.4612 Epoch: 003/150 | Batch 0300/0351 | Loss: 1.5310 Epoch: 003/150 | Train: 44.74% | Validation: 45.50% | Best Validation (Ep. 003): 45.50% Time elapsed: 2.99 min Epoch: 004/150 | Batch 0000/0351 | Loss: 1.4913 Epoch: 004/150 | Batch 0100/0351 | Loss: 1.4658 Epoch: 004/150 | Batch 0200/0351 | Loss: 1.4478 Epoch: 004/150 | Batch 0300/0351 | Loss: 1.5523 Epoch: 004/150 | Train: 48.72% | Validation: 49.52% | Best Validation (Ep. 004): 49.52% Time elapsed: 4.00 min Epoch: 005/150 | Batch 0000/0351 | Loss: 1.2705 Epoch: 005/150 | Batch 0100/0351 | Loss: 1.5536 Epoch: 005/150 | Batch 0200/0351 | Loss: 1.2093 Epoch: 005/150 | Batch 0300/0351 | Loss: 1.2654 Epoch: 005/150 | Train: 54.51% | Validation: 55.88% | Best Validation (Ep. 005): 55.88% Time elapsed: 5.02 min Epoch: 006/150 | Batch 0000/0351 | Loss: 1.1654 Epoch: 006/150 | Batch 0100/0351 | Loss: 1.3058 Epoch: 006/150 | Batch 0200/0351 | Loss: 1.3688 Epoch: 006/150 | Batch 0300/0351 | Loss: 1.0430 Epoch: 006/150 | Train: 58.77% | Validation: 59.56% | Best Validation (Ep. 006): 59.56% Time elapsed: 6.02 min Epoch: 007/150 | Batch 0000/0351 | Loss: 1.0396 Epoch: 007/150 | Batch 0100/0351 | Loss: 1.0419 Epoch: 007/150 | Batch 0200/0351 | Loss: 1.0635 Epoch: 007/150 | Batch 0300/0351 | Loss: 1.0383 Epoch: 007/150 | Train: 66.43% | Validation: 66.68% | Best Validation (Ep. 007): 66.68% Time elapsed: 7.03 min Epoch: 008/150 | Batch 0000/0351 | Loss: 0.8938 Epoch: 008/150 | Batch 0100/0351 | Loss: 0.8783 Epoch: 008/150 | Batch 0200/0351 | Loss: 0.9044 Epoch: 008/150 | Batch 0300/0351 | Loss: 0.8579 Epoch: 008/150 | Train: 71.17% | Validation: 70.60% | Best Validation (Ep. 008): 70.60% Time elapsed: 8.03 min Epoch: 009/150 | Batch 0000/0351 | Loss: 0.8389 Epoch: 009/150 | Batch 0100/0351 | Loss: 0.7041 Epoch: 009/150 | Batch 0200/0351 | Loss: 0.6849 Epoch: 009/150 | Batch 0300/0351 | Loss: 0.9598 Epoch: 009/150 | Train: 71.66% | Validation: 71.24% | Best Validation (Ep. 009): 71.24% Time elapsed: 9.04 min Epoch: 010/150 | Batch 0000/0351 | Loss: 0.8388 Epoch: 010/150 | Batch 0100/0351 | Loss: 0.8849 Epoch: 010/150 | Batch 0200/0351 | Loss: 0.6462 Epoch: 010/150 | Batch 0300/0351 | Loss: 0.7841 Epoch: 010/150 | Train: 74.38% | Validation: 73.74% | Best Validation (Ep. 010): 73.74% Time elapsed: 10.05 min Epoch: 011/150 | Batch 0000/0351 | Loss: 0.6932 Epoch: 011/150 | Batch 0100/0351 | Loss: 0.7479 Epoch: 011/150 | Batch 0200/0351 | Loss: 0.7884 Epoch: 011/150 | Batch 0300/0351 | Loss: 0.6257 Epoch: 011/150 | Train: 76.58% | Validation: 75.58% | Best Validation (Ep. 011): 75.58% Time elapsed: 11.06 min Epoch: 012/150 | Batch 0000/0351 | Loss: 0.8433 Epoch: 012/150 | Batch 0100/0351 | Loss: 0.8349 Epoch: 012/150 | Batch 0200/0351 | Loss: 0.6882 Epoch: 012/150 | Batch 0300/0351 | Loss: 0.6866 Epoch: 012/150 | Train: 77.64% | Validation: 76.52% | Best Validation (Ep. 012): 76.52% Time elapsed: 12.07 min Epoch: 013/150 | Batch 0000/0351 | Loss: 0.5299 Epoch: 013/150 | Batch 0100/0351 | Loss: 0.6179 Epoch: 013/150 | Batch 0200/0351 | Loss: 0.5531 Epoch: 013/150 | Batch 0300/0351 | Loss: 0.7265 Epoch: 013/150 | Train: 78.00% | Validation: 77.24% | Best Validation (Ep. 013): 77.24% Time elapsed: 13.09 min Epoch: 014/150 | Batch 0000/0351 | Loss: 0.4452 Epoch: 014/150 | Batch 0100/0351 | Loss: 0.5744 Epoch: 014/150 | Batch 0200/0351 | Loss: 0.7048 Epoch: 014/150 | Batch 0300/0351 | Loss: 0.5696 Epoch: 014/150 | Train: 79.76% | Validation: 77.90% | Best Validation (Ep. 014): 77.90% Time elapsed: 14.10 min Epoch: 015/150 | Batch 0000/0351 | Loss: 0.5678 Epoch: 015/150 | Batch 0100/0351 | Loss: 0.5798 Epoch: 015/150 | Batch 0200/0351 | Loss: 0.6856 Epoch: 015/150 | Batch 0300/0351 | Loss: 0.6595 Epoch: 015/150 | Train: 79.52% | Validation: 77.58% | Best Validation (Ep. 014): 77.90% Time elapsed: 15.11 min Epoch: 016/150 | Batch 0000/0351 | Loss: 0.5353 Epoch: 016/150 | Batch 0100/0351 | Loss: 0.6401 Epoch: 016/150 | Batch 0200/0351 | Loss: 0.5083 Epoch: 016/150 | Batch 0300/0351 | Loss: 0.4918 Epoch: 016/150 | Train: 82.27% | Validation: 80.12% | Best Validation (Ep. 016): 80.12% Time elapsed: 16.11 min Epoch: 017/150 | Batch 0000/0351 | Loss: 0.4974 Epoch: 017/150 | Batch 0100/0351 | Loss: 0.5485 Epoch: 017/150 | Batch 0200/0351 | Loss: 0.5972 Epoch: 017/150 | Batch 0300/0351 | Loss: 0.6958 Epoch: 017/150 | Train: 82.55% | Validation: 79.38% | Best Validation (Ep. 016): 80.12% Time elapsed: 17.11 min Epoch: 018/150 | Batch 0000/0351 | Loss: 0.4886 Epoch: 018/150 | Batch 0100/0351 | Loss: 0.4194 Epoch: 018/150 | Batch 0200/0351 | Loss: 0.6284 Epoch: 018/150 | Batch 0300/0351 | Loss: 0.5414 Epoch: 018/150 | Train: 80.12% | Validation: 77.76% | Best Validation (Ep. 016): 80.12% Time elapsed: 18.12 min Epoch: 019/150 | Batch 0000/0351 | Loss: 0.5281 Epoch: 019/150 | Batch 0100/0351 | Loss: 0.5364 Epoch: 019/150 | Batch 0200/0351 | Loss: 0.5189 Epoch: 019/150 | Batch 0300/0351 | Loss: 0.4465 Epoch: 019/150 | Train: 84.77% | Validation: 81.76% | Best Validation (Ep. 019): 81.76% Time elapsed: 19.13 min Epoch: 020/150 | Batch 0000/0351 | Loss: 0.4794 Epoch: 020/150 | Batch 0100/0351 | Loss: 0.4932 Epoch: 020/150 | Batch 0200/0351 | Loss: 0.5954 Epoch: 020/150 | Batch 0300/0351 | Loss: 0.4434 Epoch: 020/150 | Train: 85.05% | Validation: 81.80% | Best Validation (Ep. 020): 81.80% Time elapsed: 20.14 min Epoch: 021/150 | Batch 0000/0351 | Loss: 0.4239 Epoch: 021/150 | Batch 0100/0351 | Loss: 0.4169 Epoch: 021/150 | Batch 0200/0351 | Loss: 0.5721 Epoch: 021/150 | Batch 0300/0351 | Loss: 0.4595 Epoch: 021/150 | Train: 84.10% | Validation: 80.30% | Best Validation (Ep. 020): 81.80% Time elapsed: 21.15 min Epoch: 022/150 | Batch 0000/0351 | Loss: 0.4494 Epoch: 022/150 | Batch 0100/0351 | Loss: 0.4051 Epoch: 022/150 | Batch 0200/0351 | Loss: 0.5057 Epoch: 022/150 | Batch 0300/0351 | Loss: 0.3307 Epoch: 022/150 | Train: 85.38% | Validation: 80.78% | Best Validation (Ep. 020): 81.80% Time elapsed: 22.17 min Epoch: 023/150 | Batch 0000/0351 | Loss: 0.4090 Epoch: 023/150 | Batch 0100/0351 | Loss: 0.5980 Epoch: 023/150 | Batch 0200/0351 | Loss: 0.3950 Epoch: 023/150 | Batch 0300/0351 | Loss: 0.4792 Epoch: 023/150 | Train: 86.38% | Validation: 82.06% | Best Validation (Ep. 023): 82.06% Time elapsed: 23.17 min Epoch: 024/150 | Batch 0000/0351 | Loss: 0.3847 Epoch: 024/150 | Batch 0100/0351 | Loss: 0.4703 Epoch: 024/150 | Batch 0200/0351 | Loss: 0.3416 Epoch: 024/150 | Batch 0300/0351 | Loss: 0.3956 Epoch: 024/150 | Train: 85.18% | Validation: 80.80% | Best Validation (Ep. 023): 82.06% Time elapsed: 24.18 min Epoch: 025/150 | Batch 0000/0351 | Loss: 0.4564 Epoch: 025/150 | Batch 0100/0351 | Loss: 0.3545 Epoch: 025/150 | Batch 0200/0351 | Loss: 0.5305 Epoch: 025/150 | Batch 0300/0351 | Loss: 0.5551 Epoch: 025/150 | Train: 84.97% | Validation: 80.86% | Best Validation (Ep. 023): 82.06% Time elapsed: 25.18 min Epoch: 026/150 | Batch 0000/0351 | Loss: 0.2954 Epoch: 026/150 | Batch 0100/0351 | Loss: 0.3316 Epoch: 026/150 | Batch 0200/0351 | Loss: 0.3769 Epoch: 026/150 | Batch 0300/0351 | Loss: 0.3684 Epoch: 026/150 | Train: 87.97% | Validation: 82.52% | Best Validation (Ep. 026): 82.52% Time elapsed: 26.19 min Epoch: 027/150 | Batch 0000/0351 | Loss: 0.3095 Epoch: 027/150 | Batch 0100/0351 | Loss: 0.3755 Epoch: 027/150 | Batch 0200/0351 | Loss: 0.3470 Epoch: 027/150 | Batch 0300/0351 | Loss: 0.3880 Epoch: 027/150 | Train: 88.26% | Validation: 83.06% | Best Validation (Ep. 027): 83.06% Time elapsed: 27.19 min Epoch: 028/150 | Batch 0000/0351 | Loss: 0.3093 Epoch: 028/150 | Batch 0100/0351 | Loss: 0.5271 Epoch: 028/150 | Batch 0200/0351 | Loss: 0.3308 Epoch: 028/150 | Batch 0300/0351 | Loss: 0.4117 Epoch: 028/150 | Train: 86.03% | Validation: 81.22% | Best Validation (Ep. 027): 83.06% Time elapsed: 28.20 min Epoch: 029/150 | Batch 0000/0351 | Loss: 0.3505 Epoch: 029/150 | Batch 0100/0351 | Loss: 0.3633 Epoch: 029/150 | Batch 0200/0351 | Loss: 0.3977 Epoch: 029/150 | Batch 0300/0351 | Loss: 0.3278 Epoch: 029/150 | Train: 88.31% | Validation: 82.16% | Best Validation (Ep. 027): 83.06% Time elapsed: 29.20 min Epoch: 030/150 | Batch 0000/0351 | Loss: 0.2648 Epoch: 030/150 | Batch 0100/0351 | Loss: 0.3081 Epoch: 030/150 | Batch 0200/0351 | Loss: 0.2555 Epoch: 030/150 | Batch 0300/0351 | Loss: 0.3020 Epoch: 030/150 | Train: 89.37% | Validation: 83.20% | Best Validation (Ep. 030): 83.20% Time elapsed: 30.20 min Epoch: 031/150 | Batch 0000/0351 | Loss: 0.3236 Epoch: 031/150 | Batch 0100/0351 | Loss: 0.3439 Epoch: 031/150 | Batch 0200/0351 | Loss: 0.1880 Epoch: 031/150 | Batch 0300/0351 | Loss: 0.3492 Epoch: 031/150 | Train: 89.59% | Validation: 84.06% | Best Validation (Ep. 031): 84.06% Time elapsed: 31.20 min Epoch: 032/150 | Batch 0000/0351 | Loss: 0.3168 Epoch: 032/150 | Batch 0100/0351 | Loss: 0.3492 Epoch: 032/150 | Batch 0200/0351 | Loss: 0.2662 Epoch: 032/150 | Batch 0300/0351 | Loss: 0.4659 Epoch: 032/150 | Train: 89.59% | Validation: 83.02% | Best Validation (Ep. 031): 84.06% Time elapsed: 32.20 min Epoch: 033/150 | Batch 0000/0351 | Loss: 0.3181 Epoch: 033/150 | Batch 0100/0351 | Loss: 0.2911 Epoch: 033/150 | Batch 0200/0351 | Loss: 0.3782 Epoch: 033/150 | Batch 0300/0351 | Loss: 0.3123 Epoch: 033/150 | Train: 89.62% | Validation: 82.86% | Best Validation (Ep. 031): 84.06% Time elapsed: 33.21 min Epoch: 034/150 | Batch 0000/0351 | Loss: 0.3160 Epoch: 034/150 | Batch 0100/0351 | Loss: 0.3951 Epoch: 034/150 | Batch 0200/0351 | Loss: 0.2886 Epoch: 034/150 | Batch 0300/0351 | Loss: 0.1815 Epoch: 034/150 | Train: 91.41% | Validation: 84.08% | Best Validation (Ep. 034): 84.08% Time elapsed: 34.21 min Epoch: 035/150 | Batch 0000/0351 | Loss: 0.3103 Epoch: 035/150 | Batch 0100/0351 | Loss: 0.3028 Epoch: 035/150 | Batch 0200/0351 | Loss: 0.3426 Epoch: 035/150 | Batch 0300/0351 | Loss: 0.2469 Epoch: 035/150 | Train: 91.55% | Validation: 84.32% | Best Validation (Ep. 035): 84.32% Time elapsed: 35.22 min Epoch: 036/150 | Batch 0000/0351 | Loss: 0.2431 Epoch: 036/150 | Batch 0100/0351 | Loss: 0.2956 Epoch: 036/150 | Batch 0200/0351 | Loss: 0.2675 Epoch: 036/150 | Batch 0300/0351 | Loss: 0.3654 Epoch: 036/150 | Train: 91.66% | Validation: 84.52% | Best Validation (Ep. 036): 84.52% Time elapsed: 36.23 min Epoch: 037/150 | Batch 0000/0351 | Loss: 0.1738 Epoch: 037/150 | Batch 0100/0351 | Loss: 0.2952 Epoch: 037/150 | Batch 0200/0351 | Loss: 0.2376 Epoch: 037/150 | Batch 0300/0351 | Loss: 0.3235 Epoch: 037/150 | Train: 91.31% | Validation: 83.74% | Best Validation (Ep. 036): 84.52% Time elapsed: 37.23 min Epoch: 038/150 | Batch 0000/0351 | Loss: 0.3415 Epoch: 038/150 | Batch 0100/0351 | Loss: 0.2802 Epoch: 038/150 | Batch 0200/0351 | Loss: 0.2865 Epoch: 038/150 | Batch 0300/0351 | Loss: 0.2547 Epoch: 038/150 | Train: 91.63% | Validation: 84.58% | Best Validation (Ep. 038): 84.58% Time elapsed: 38.24 min Epoch: 039/150 | Batch 0000/0351 | Loss: 0.2538 Epoch: 039/150 | Batch 0100/0351 | Loss: 0.2693 Epoch: 039/150 | Batch 0200/0351 | Loss: 0.3096 Epoch: 039/150 | Batch 0300/0351 | Loss: 0.2070 Epoch: 039/150 | Train: 92.33% | Validation: 84.20% | Best Validation (Ep. 038): 84.58% Time elapsed: 39.26 min Epoch: 040/150 | Batch 0000/0351 | Loss: 0.2201 Epoch: 040/150 | Batch 0100/0351 | Loss: 0.4122 Epoch: 040/150 | Batch 0200/0351 | Loss: 0.2909 Epoch: 040/150 | Batch 0300/0351 | Loss: 0.2352 Epoch: 040/150 | Train: 92.51% | Validation: 84.56% | Best Validation (Ep. 038): 84.58% Time elapsed: 40.27 min Epoch: 041/150 | Batch 0000/0351 | Loss: 0.2002 Epoch: 041/150 | Batch 0100/0351 | Loss: 0.2190 Epoch: 041/150 | Batch 0200/0351 | Loss: 0.2712 Epoch: 041/150 | Batch 0300/0351 | Loss: 0.2431 Epoch: 041/150 | Train: 92.84% | Validation: 84.68% | Best Validation (Ep. 041): 84.68% Time elapsed: 41.27 min Epoch: 042/150 | Batch 0000/0351 | Loss: 0.1842 Epoch: 042/150 | Batch 0100/0351 | Loss: 0.3098 Epoch: 042/150 | Batch 0200/0351 | Loss: 0.2653 Epoch: 042/150 | Batch 0300/0351 | Loss: 0.1495 Epoch: 042/150 | Train: 92.87% | Validation: 85.32% | Best Validation (Ep. 042): 85.32% Time elapsed: 42.28 min Epoch: 043/150 | Batch 0000/0351 | Loss: 0.1133 Epoch: 043/150 | Batch 0100/0351 | Loss: 0.2010 Epoch: 043/150 | Batch 0200/0351 | Loss: 0.1821 Epoch: 043/150 | Batch 0300/0351 | Loss: 0.3471 Epoch: 043/150 | Train: 92.91% | Validation: 85.12% | Best Validation (Ep. 042): 85.32% Time elapsed: 43.30 min Epoch: 044/150 | Batch 0000/0351 | Loss: 0.1283 Epoch: 044/150 | Batch 0100/0351 | Loss: 0.2000 Epoch: 044/150 | Batch 0200/0351 | Loss: 0.2470 Epoch: 044/150 | Batch 0300/0351 | Loss: 0.1974 Epoch: 044/150 | Train: 93.63% | Validation: 85.08% | Best Validation (Ep. 042): 85.32% Time elapsed: 44.31 min Epoch: 045/150 | Batch 0000/0351 | Loss: 0.1559 Epoch: 045/150 | Batch 0100/0351 | Loss: 0.1951 Epoch: 045/150 | Batch 0200/0351 | Loss: 0.1584 Epoch: 045/150 | Batch 0300/0351 | Loss: 0.2041 Epoch: 045/150 | Train: 93.39% | Validation: 84.20% | Best Validation (Ep. 042): 85.32% Time elapsed: 45.31 min Epoch: 046/150 | Batch 0000/0351 | Loss: 0.1548 Epoch: 046/150 | Batch 0100/0351 | Loss: 0.2453 Epoch: 046/150 | Batch 0200/0351 | Loss: 0.2384 Epoch: 046/150 | Batch 0300/0351 | Loss: 0.3172 Epoch: 046/150 | Train: 93.11% | Validation: 84.82% | Best Validation (Ep. 042): 85.32% Time elapsed: 46.32 min Epoch: 047/150 | Batch 0000/0351 | Loss: 0.1467 Epoch: 047/150 | Batch 0100/0351 | Loss: 0.2305 Epoch: 047/150 | Batch 0200/0351 | Loss: 0.1956 Epoch: 047/150 | Batch 0300/0351 | Loss: 0.2545 Epoch: 047/150 | Train: 93.48% | Validation: 84.94% | Best Validation (Ep. 042): 85.32% Time elapsed: 47.34 min Epoch: 048/150 | Batch 0000/0351 | Loss: 0.1824 Epoch: 048/150 | Batch 0100/0351 | Loss: 0.2651 Epoch: 048/150 | Batch 0200/0351 | Loss: 0.1078 Epoch: 048/150 | Batch 0300/0351 | Loss: 0.2309 Epoch: 048/150 | Train: 93.45% | Validation: 85.00% | Best Validation (Ep. 042): 85.32% Time elapsed: 48.35 min Epoch: 049/150 | Batch 0000/0351 | Loss: 0.2058 Epoch: 049/150 | Batch 0100/0351 | Loss: 0.1847 Epoch: 049/150 | Batch 0200/0351 | Loss: 0.1582 Epoch: 049/150 | Batch 0300/0351 | Loss: 0.2515 Epoch: 049/150 | Train: 94.97% | Validation: 86.04% | Best Validation (Ep. 049): 86.04% Time elapsed: 49.35 min Epoch: 050/150 | Batch 0000/0351 | Loss: 0.1379 Epoch: 050/150 | Batch 0100/0351 | Loss: 0.1774 Epoch: 050/150 | Batch 0200/0351 | Loss: 0.2422 Epoch: 050/150 | Batch 0300/0351 | Loss: 0.2153 Epoch: 050/150 | Train: 94.53% | Validation: 85.36% | Best Validation (Ep. 049): 86.04% Time elapsed: 50.36 min Epoch: 051/150 | Batch 0000/0351 | Loss: 0.0821 Epoch: 051/150 | Batch 0100/0351 | Loss: 0.1976 Epoch: 051/150 | Batch 0200/0351 | Loss: 0.2350 Epoch: 051/150 | Batch 0300/0351 | Loss: 0.1680 Epoch: 051/150 | Train: 94.13% | Validation: 84.74% | Best Validation (Ep. 049): 86.04% Time elapsed: 51.36 min Epoch: 052/150 | Batch 0000/0351 | Loss: 0.1485 Epoch: 052/150 | Batch 0100/0351 | Loss: 0.1863 Epoch: 052/150 | Batch 0200/0351 | Loss: 0.2098 Epoch: 052/150 | Batch 0300/0351 | Loss: 0.2109 Epoch: 052/150 | Train: 94.74% | Validation: 85.48% | Best Validation (Ep. 049): 86.04% Time elapsed: 52.35 min Epoch: 053/150 | Batch 0000/0351 | Loss: 0.1633 Epoch: 053/150 | Batch 0100/0351 | Loss: 0.0838 Epoch: 053/150 | Batch 0200/0351 | Loss: 0.2669 Epoch: 053/150 | Batch 0300/0351 | Loss: 0.1616 Epoch: 053/150 | Train: 93.19% | Validation: 84.68% | Best Validation (Ep. 049): 86.04% Time elapsed: 53.36 min Epoch: 054/150 | Batch 0000/0351 | Loss: 0.1686 Epoch: 054/150 | Batch 0100/0351 | Loss: 0.2059 Epoch: 054/150 | Batch 0200/0351 | Loss: 0.1793 Epoch: 054/150 | Batch 0300/0351 | Loss: 0.1698 Epoch: 054/150 | Train: 94.90% | Validation: 85.16% | Best Validation (Ep. 049): 86.04% Time elapsed: 54.37 min Epoch: 055/150 | Batch 0000/0351 | Loss: 0.2452 Epoch: 055/150 | Batch 0100/0351 | Loss: 0.1848 Epoch: 055/150 | Batch 0200/0351 | Loss: 0.1840 Epoch: 055/150 | Batch 0300/0351 | Loss: 0.1063 Epoch: 055/150 | Train: 94.45% | Validation: 84.64% | Best Validation (Ep. 049): 86.04% Time elapsed: 55.38 min Epoch: 056/150 | Batch 0000/0351 | Loss: 0.1848 Epoch: 056/150 | Batch 0100/0351 | Loss: 0.2289 Epoch: 056/150 | Batch 0200/0351 | Loss: 0.2413 Epoch: 056/150 | Batch 0300/0351 | Loss: 0.1991 Epoch: 056/150 | Train: 93.75% | Validation: 83.98% | Best Validation (Ep. 049): 86.04% Time elapsed: 56.39 min Epoch: 057/150 | Batch 0000/0351 | Loss: 0.1368 Epoch: 057/150 | Batch 0100/0351 | Loss: 0.1951 Epoch: 057/150 | Batch 0200/0351 | Loss: 0.1271 Epoch: 057/150 | Batch 0300/0351 | Loss: 0.1547 Epoch: 057/150 | Train: 94.10% | Validation: 84.46% | Best Validation (Ep. 049): 86.04% Time elapsed: 57.39 min Epoch: 058/150 | Batch 0000/0351 | Loss: 0.1941 Epoch: 058/150 | Batch 0100/0351 | Loss: 0.0985 Epoch: 058/150 | Batch 0200/0351 | Loss: 0.1238 Epoch: 058/150 | Batch 0300/0351 | Loss: 0.1051 Epoch: 058/150 | Train: 95.09% | Validation: 85.54% | Best Validation (Ep. 049): 86.04% Time elapsed: 58.40 min Epoch: 059/150 | Batch 0000/0351 | Loss: 0.1273 Epoch: 059/150 | Batch 0100/0351 | Loss: 0.1513 Epoch: 059/150 | Batch 0200/0351 | Loss: 0.1755 Epoch: 059/150 | Batch 0300/0351 | Loss: 0.1825 Epoch: 059/150 | Train: 95.20% | Validation: 84.60% | Best Validation (Ep. 049): 86.04% Time elapsed: 59.40 min Epoch: 060/150 | Batch 0000/0351 | Loss: 0.1481 Epoch: 060/150 | Batch 0100/0351 | Loss: 0.1254 Epoch: 060/150 | Batch 0200/0351 | Loss: 0.0907 Epoch: 060/150 | Batch 0300/0351 | Loss: 0.1262 Epoch: 060/150 | Train: 95.50% | Validation: 85.74% | Best Validation (Ep. 049): 86.04% Time elapsed: 60.41 min Epoch: 061/150 | Batch 0000/0351 | Loss: 0.1217 Epoch: 061/150 | Batch 0100/0351 | Loss: 0.1213 Epoch: 061/150 | Batch 0200/0351 | Loss: 0.2105 Epoch: 061/150 | Batch 0300/0351 | Loss: 0.2153 Epoch: 061/150 | Train: 96.01% | Validation: 85.62% | Best Validation (Ep. 049): 86.04% Time elapsed: 61.43 min Epoch: 062/150 | Batch 0000/0351 | Loss: 0.2152 Epoch: 062/150 | Batch 0100/0351 | Loss: 0.1406 Epoch: 062/150 | Batch 0200/0351 | Loss: 0.2111 Epoch: 062/150 | Batch 0300/0351 | Loss: 0.0902 Epoch: 062/150 | Train: 95.27% | Validation: 85.14% | Best Validation (Ep. 049): 86.04% Time elapsed: 62.44 min Epoch: 063/150 | Batch 0000/0351 | Loss: 0.1066 Epoch: 063/150 | Batch 0100/0351 | Loss: 0.2216 Epoch: 063/150 | Batch 0200/0351 | Loss: 0.1705 Epoch: 063/150 | Batch 0300/0351 | Loss: 0.1159 Epoch: 063/150 | Train: 93.95% | Validation: 84.42% | Best Validation (Ep. 049): 86.04% Time elapsed: 63.45 min Epoch: 064/150 | Batch 0000/0351 | Loss: 0.2963 Epoch: 064/150 | Batch 0100/0351 | Loss: 0.1884 Epoch: 064/150 | Batch 0200/0351 | Loss: 0.1314 Epoch: 064/150 | Batch 0300/0351 | Loss: 0.0855 Epoch: 064/150 | Train: 95.88% | Validation: 86.20% | Best Validation (Ep. 064): 86.20% Time elapsed: 64.46 min Epoch: 065/150 | Batch 0000/0351 | Loss: 0.0997 Epoch: 065/150 | Batch 0100/0351 | Loss: 0.0853 Epoch: 065/150 | Batch 0200/0351 | Loss: 0.1124 Epoch: 065/150 | Batch 0300/0351 | Loss: 0.1675 Epoch: 065/150 | Train: 95.48% | Validation: 85.88% | Best Validation (Ep. 064): 86.20% Time elapsed: 65.47 min Epoch: 066/150 | Batch 0000/0351 | Loss: 0.2027 Epoch: 066/150 | Batch 0100/0351 | Loss: 0.1531 Epoch: 066/150 | Batch 0200/0351 | Loss: 0.1167 Epoch: 066/150 | Batch 0300/0351 | Loss: 0.1602 Epoch: 066/150 | Train: 95.45% | Validation: 85.14% | Best Validation (Ep. 064): 86.20% Time elapsed: 66.48 min Epoch: 067/150 | Batch 0000/0351 | Loss: 0.1469 Epoch: 067/150 | Batch 0100/0351 | Loss: 0.1220 Epoch: 067/150 | Batch 0200/0351 | Loss: 0.1505 Epoch: 067/150 | Batch 0300/0351 | Loss: 0.1067 Epoch: 067/150 | Train: 96.46% | Validation: 85.68% | Best Validation (Ep. 064): 86.20% Time elapsed: 67.49 min Epoch: 068/150 | Batch 0000/0351 | Loss: 0.1765 Epoch: 068/150 | Batch 0100/0351 | Loss: 0.1393 Epoch: 068/150 | Batch 0200/0351 | Loss: 0.1714 Epoch: 068/150 | Batch 0300/0351 | Loss: 0.1725 Epoch: 068/150 | Train: 95.82% | Validation: 85.78% | Best Validation (Ep. 064): 86.20% Time elapsed: 68.50 min Epoch: 069/150 | Batch 0000/0351 | Loss: 0.1317 Epoch: 069/150 | Batch 0100/0351 | Loss: 0.1835 Epoch: 069/150 | Batch 0200/0351 | Loss: 0.1236 Epoch: 069/150 | Batch 0300/0351 | Loss: 0.1861 Epoch: 069/150 | Train: 96.25% | Validation: 85.74% | Best Validation (Ep. 064): 86.20% Time elapsed: 69.51 min Epoch: 070/150 | Batch 0000/0351 | Loss: 0.0724 Epoch: 070/150 | Batch 0100/0351 | Loss: 0.1476 Epoch: 070/150 | Batch 0200/0351 | Loss: 0.2050 Epoch: 070/150 | Batch 0300/0351 | Loss: 0.1338 Epoch: 070/150 | Train: 94.90% | Validation: 84.74% | Best Validation (Ep. 064): 86.20% Time elapsed: 70.52 min Epoch: 071/150 | Batch 0000/0351 | Loss: 0.0960 Epoch: 071/150 | Batch 0100/0351 | Loss: 0.1102 Epoch: 071/150 | Batch 0200/0351 | Loss: 0.1163 Epoch: 071/150 | Batch 0300/0351 | Loss: 0.0854 Epoch: 071/150 | Train: 96.80% | Validation: 86.38% | Best Validation (Ep. 071): 86.38% Time elapsed: 71.51 min Epoch: 072/150 | Batch 0000/0351 | Loss: 0.1198 Epoch: 072/150 | Batch 0100/0351 | Loss: 0.0642 Epoch: 072/150 | Batch 0200/0351 | Loss: 0.1525 Epoch: 072/150 | Batch 0300/0351 | Loss: 0.0903 Epoch: 072/150 | Train: 95.85% | Validation: 85.02% | Best Validation (Ep. 071): 86.38% Time elapsed: 72.51 min Epoch: 073/150 | Batch 0000/0351 | Loss: 0.1271 Epoch: 073/150 | Batch 0100/0351 | Loss: 0.1253 Epoch: 073/150 | Batch 0200/0351 | Loss: 0.1492 Epoch: 073/150 | Batch 0300/0351 | Loss: 0.1213 Epoch: 073/150 | Train: 95.81% | Validation: 85.30% | Best Validation (Ep. 071): 86.38% Time elapsed: 73.52 min Epoch: 074/150 | Batch 0000/0351 | Loss: 0.0746 Epoch: 074/150 | Batch 0100/0351 | Loss: 0.0878 Epoch: 074/150 | Batch 0200/0351 | Loss: 0.1037 Epoch: 074/150 | Batch 0300/0351 | Loss: 0.1882 Epoch: 074/150 | Train: 96.66% | Validation: 85.78% | Best Validation (Ep. 071): 86.38% Time elapsed: 74.52 min Epoch: 075/150 | Batch 0000/0351 | Loss: 0.1323 Epoch: 075/150 | Batch 0100/0351 | Loss: 0.0349 Epoch: 075/150 | Batch 0200/0351 | Loss: 0.0717 Epoch: 075/150 | Batch 0300/0351 | Loss: 0.1919 Epoch: 075/150 | Train: 96.27% | Validation: 85.62% | Best Validation (Ep. 071): 86.38% Time elapsed: 75.52 min Epoch: 076/150 | Batch 0000/0351 | Loss: 0.0303 Epoch: 076/150 | Batch 0100/0351 | Loss: 0.1512 Epoch: 076/150 | Batch 0200/0351 | Loss: 0.1530 Epoch: 076/150 | Batch 0300/0351 | Loss: 0.0753 Epoch: 076/150 | Train: 96.65% | Validation: 85.96% | Best Validation (Ep. 071): 86.38% Time elapsed: 76.52 min Epoch: 077/150 | Batch 0000/0351 | Loss: 0.0520 Epoch: 077/150 | Batch 0100/0351 | Loss: 0.1501 Epoch: 077/150 | Batch 0200/0351 | Loss: 0.0826 Epoch: 077/150 | Batch 0300/0351 | Loss: 0.0843 Epoch: 077/150 | Train: 96.66% | Validation: 85.88% | Best Validation (Ep. 071): 86.38% Time elapsed: 77.54 min Epoch: 078/150 | Batch 0000/0351 | Loss: 0.1547 Epoch: 078/150 | Batch 0100/0351 | Loss: 0.0569 Epoch: 078/150 | Batch 0200/0351 | Loss: 0.2133 Epoch: 078/150 | Batch 0300/0351 | Loss: 0.1873 Epoch: 078/150 | Train: 96.80% | Validation: 85.28% | Best Validation (Ep. 071): 86.38% Time elapsed: 78.54 min Epoch: 079/150 | Batch 0000/0351 | Loss: 0.2807 Epoch: 079/150 | Batch 0100/0351 | Loss: 0.0939 Epoch: 079/150 | Batch 0200/0351 | Loss: 0.1360 Epoch: 079/150 | Batch 0300/0351 | Loss: 0.2034 Epoch: 079/150 | Train: 96.44% | Validation: 85.64% | Best Validation (Ep. 071): 86.38% Time elapsed: 79.55 min Epoch: 080/150 | Batch 0000/0351 | Loss: 0.1483 Epoch: 080/150 | Batch 0100/0351 | Loss: 0.1160 Epoch: 080/150 | Batch 0200/0351 | Loss: 0.1149 Epoch: 080/150 | Batch 0300/0351 | Loss: 0.0615 Epoch: 080/150 | Train: 96.47% | Validation: 85.82% | Best Validation (Ep. 071): 86.38% Time elapsed: 80.55 min Epoch: 081/150 | Batch 0000/0351 | Loss: 0.0710 Epoch: 081/150 | Batch 0100/0351 | Loss: 0.0990 Epoch: 081/150 | Batch 0200/0351 | Loss: 0.1161 Epoch: 081/150 | Batch 0300/0351 | Loss: 0.1241 Epoch: 081/150 | Train: 95.85% | Validation: 84.84% | Best Validation (Ep. 071): 86.38% Time elapsed: 81.55 min Epoch: 082/150 | Batch 0000/0351 | Loss: 0.0717 Epoch: 082/150 | Batch 0100/0351 | Loss: 0.0562 Epoch: 082/150 | Batch 0200/0351 | Loss: 0.3689 Epoch: 082/150 | Batch 0300/0351 | Loss: 0.0948 Epoch: 082/150 | Train: 97.17% | Validation: 86.42% | Best Validation (Ep. 082): 86.42% Time elapsed: 82.56 min Epoch: 083/150 | Batch 0000/0351 | Loss: 0.0828 Epoch: 083/150 | Batch 0100/0351 | Loss: 0.0960 Epoch: 083/150 | Batch 0200/0351 | Loss: 0.0794 Epoch: 083/150 | Batch 0300/0351 | Loss: 0.0713 Epoch: 083/150 | Train: 96.49% | Validation: 85.04% | Best Validation (Ep. 082): 86.42% Time elapsed: 83.57 min Epoch: 084/150 | Batch 0000/0351 | Loss: 0.1174 Epoch: 084/150 | Batch 0100/0351 | Loss: 0.0235 Epoch: 084/150 | Batch 0200/0351 | Loss: 0.0819 Epoch: 084/150 | Batch 0300/0351 | Loss: 0.1047 Epoch: 084/150 | Train: 96.77% | Validation: 85.18% | Best Validation (Ep. 082): 86.42% Time elapsed: 84.57 min Epoch: 085/150 | Batch 0000/0351 | Loss: 0.1802 Epoch: 085/150 | Batch 0100/0351 | Loss: 0.0961 Epoch: 085/150 | Batch 0200/0351 | Loss: 0.0932 Epoch: 085/150 | Batch 0300/0351 | Loss: 0.2067 Epoch: 085/150 | Train: 97.17% | Validation: 86.40% | Best Validation (Ep. 082): 86.42% Time elapsed: 85.58 min Epoch: 086/150 | Batch 0000/0351 | Loss: 0.0612 Epoch: 086/150 | Batch 0100/0351 | Loss: 0.0702 Epoch: 086/150 | Batch 0200/0351 | Loss: 0.1310 Epoch: 086/150 | Batch 0300/0351 | Loss: 0.1841 Epoch: 086/150 | Train: 96.64% | Validation: 86.20% | Best Validation (Ep. 082): 86.42% Time elapsed: 86.59 min Epoch: 087/150 | Batch 0000/0351 | Loss: 0.0958 Epoch: 087/150 | Batch 0100/0351 | Loss: 0.0872 Epoch: 087/150 | Batch 0200/0351 | Loss: 0.0771 Epoch: 087/150 | Batch 0300/0351 | Loss: 0.0835 Epoch: 087/150 | Train: 97.29% | Validation: 86.16% | Best Validation (Ep. 082): 86.42% Time elapsed: 87.60 min Epoch: 088/150 | Batch 0000/0351 | Loss: 0.1683 Epoch: 088/150 | Batch 0100/0351 | Loss: 0.0577 Epoch: 088/150 | Batch 0200/0351 | Loss: 0.0741 Epoch: 088/150 | Batch 0300/0351 | Loss: 0.1083 Epoch: 088/150 | Train: 97.28% | Validation: 85.52% | Best Validation (Ep. 082): 86.42% Time elapsed: 88.60 min Epoch: 089/150 | Batch 0000/0351 | Loss: 0.0245 Epoch: 089/150 | Batch 0100/0351 | Loss: 0.1728 Epoch: 089/150 | Batch 0200/0351 | Loss: 0.0918 Epoch: 089/150 | Batch 0300/0351 | Loss: 0.0738 Epoch: 089/150 | Train: 97.64% | Validation: 86.56% | Best Validation (Ep. 089): 86.56% Time elapsed: 89.60 min Epoch: 090/150 | Batch 0000/0351 | Loss: 0.0435 Epoch: 090/150 | Batch 0100/0351 | Loss: 0.0309 Epoch: 090/150 | Batch 0200/0351 | Loss: 0.0707 Epoch: 090/150 | Batch 0300/0351 | Loss: 0.0990 Epoch: 090/150 | Train: 97.49% | Validation: 86.24% | Best Validation (Ep. 089): 86.56% Time elapsed: 90.61 min Epoch: 091/150 | Batch 0000/0351 | Loss: 0.0929 Epoch: 091/150 | Batch 0100/0351 | Loss: 0.0753 Epoch: 091/150 | Batch 0200/0351 | Loss: 0.1349 Epoch: 091/150 | Batch 0300/0351 | Loss: 0.0642 Epoch: 091/150 | Train: 96.38% | Validation: 85.82% | Best Validation (Ep. 089): 86.56% Time elapsed: 91.61 min Epoch: 092/150 | Batch 0000/0351 | Loss: 0.1042 Epoch: 092/150 | Batch 0100/0351 | Loss: 0.0771 Epoch: 092/150 | Batch 0200/0351 | Loss: 0.1457 Epoch: 092/150 | Batch 0300/0351 | Loss: 0.0656 Epoch: 092/150 | Train: 97.81% | Validation: 85.96% | Best Validation (Ep. 089): 86.56% Time elapsed: 92.61 min Epoch: 093/150 | Batch 0000/0351 | Loss: 0.0889 Epoch: 093/150 | Batch 0100/0351 | Loss: 0.0979 Epoch: 093/150 | Batch 0200/0351 | Loss: 0.1160 Epoch: 093/150 | Batch 0300/0351 | Loss: 0.0413 Epoch: 093/150 | Train: 97.18% | Validation: 86.10% | Best Validation (Ep. 089): 86.56% Time elapsed: 93.61 min Epoch: 094/150 | Batch 0000/0351 | Loss: 0.1460 Epoch: 094/150 | Batch 0100/0351 | Loss: 0.0323 Epoch: 094/150 | Batch 0200/0351 | Loss: 0.0402 Epoch: 094/150 | Batch 0300/0351 | Loss: 0.0874 Epoch: 094/150 | Train: 97.11% | Validation: 86.08% | Best Validation (Ep. 089): 86.56% Time elapsed: 94.62 min Epoch: 095/150 | Batch 0000/0351 | Loss: 0.1981 Epoch: 095/150 | Batch 0100/0351 | Loss: 0.1609 Epoch: 095/150 | Batch 0200/0351 | Loss: 0.0993 Epoch: 095/150 | Batch 0300/0351 | Loss: 0.0874 Epoch: 095/150 | Train: 97.82% | Validation: 86.48% | Best Validation (Ep. 089): 86.56% Time elapsed: 95.62 min Epoch: 096/150 | Batch 0000/0351 | Loss: 0.0642 Epoch: 096/150 | Batch 0100/0351 | Loss: 0.1158 Epoch: 096/150 | Batch 0200/0351 | Loss: 0.1327 Epoch: 096/150 | Batch 0300/0351 | Loss: 0.0857 Epoch: 096/150 | Train: 97.34% | Validation: 86.06% | Best Validation (Ep. 089): 86.56% Time elapsed: 96.63 min Epoch: 097/150 | Batch 0000/0351 | Loss: 0.0844 Epoch: 097/150 | Batch 0100/0351 | Loss: 0.1231 Epoch: 097/150 | Batch 0200/0351 | Loss: 0.0546 Epoch: 097/150 | Batch 0300/0351 | Loss: 0.1181 Epoch: 097/150 | Train: 97.48% | Validation: 86.18% | Best Validation (Ep. 089): 86.56% Time elapsed: 97.64 min Epoch: 098/150 | Batch 0000/0351 | Loss: 0.0494 Epoch: 098/150 | Batch 0100/0351 | Loss: 0.1179 Epoch: 098/150 | Batch 0200/0351 | Loss: 0.1638 Epoch: 098/150 | Batch 0300/0351 | Loss: 0.0816 Epoch: 098/150 | Train: 97.34% | Validation: 85.90% | Best Validation (Ep. 089): 86.56% Time elapsed: 98.64 min Epoch: 099/150 | Batch 0000/0351 | Loss: 0.0813 Epoch: 099/150 | Batch 0100/0351 | Loss: 0.0731 Epoch: 099/150 | Batch 0200/0351 | Loss: 0.0622 Epoch: 099/150 | Batch 0300/0351 | Loss: 0.1798 Epoch: 099/150 | Train: 97.91% | Validation: 86.32% | Best Validation (Ep. 089): 86.56% Time elapsed: 99.65 min Epoch: 100/150 | Batch 0000/0351 | Loss: 0.0808 Epoch: 100/150 | Batch 0100/0351 | Loss: 0.0216 Epoch: 100/150 | Batch 0200/0351 | Loss: 0.0742 Epoch: 100/150 | Batch 0300/0351 | Loss: 0.0635 Epoch: 100/150 | Train: 98.06% | Validation: 86.52% | Best Validation (Ep. 089): 86.56% Time elapsed: 100.66 min Epoch: 101/150 | Batch 0000/0351 | Loss: 0.0541 Epoch: 101/150 | Batch 0100/0351 | Loss: 0.1638 Epoch: 101/150 | Batch 0200/0351 | Loss: 0.0995 Epoch: 101/150 | Batch 0300/0351 | Loss: 0.0831 Epoch: 101/150 | Train: 97.41% | Validation: 86.86% | Best Validation (Ep. 101): 86.86% Time elapsed: 101.66 min Epoch: 102/150 | Batch 0000/0351 | Loss: 0.0185 Epoch: 102/150 | Batch 0100/0351 | Loss: 0.1496 Epoch: 102/150 | Batch 0200/0351 | Loss: 0.1314 Epoch: 102/150 | Batch 0300/0351 | Loss: 0.0549 Epoch: 102/150 | Train: 97.78% | Validation: 86.28% | Best Validation (Ep. 101): 86.86% Time elapsed: 102.66 min Epoch: 103/150 | Batch 0000/0351 | Loss: 0.2051 Epoch: 103/150 | Batch 0100/0351 | Loss: 0.1291 Epoch: 103/150 | Batch 0200/0351 | Loss: 0.1635 Epoch: 103/150 | Batch 0300/0351 | Loss: 0.0531 Epoch: 103/150 | Train: 97.21% | Validation: 86.28% | Best Validation (Ep. 101): 86.86% Time elapsed: 103.67 min Epoch: 104/150 | Batch 0000/0351 | Loss: 0.0366 Epoch: 104/150 | Batch 0100/0351 | Loss: 0.1072 Epoch: 104/150 | Batch 0200/0351 | Loss: 0.0508 Epoch: 104/150 | Batch 0300/0351 | Loss: 0.0789 Epoch: 104/150 | Train: 97.54% | Validation: 86.60% | Best Validation (Ep. 101): 86.86% Time elapsed: 104.66 min Epoch: 105/150 | Batch 0000/0351 | Loss: 0.0787 Epoch: 105/150 | Batch 0100/0351 | Loss: 0.0761 Epoch: 105/150 | Batch 0200/0351 | Loss: 0.0673 Epoch: 105/150 | Batch 0300/0351 | Loss: 0.0826 Epoch: 105/150 | Train: 97.47% | Validation: 86.78% | Best Validation (Ep. 101): 86.86% Time elapsed: 105.66 min Epoch: 106/150 | Batch 0000/0351 | Loss: 0.0462 Epoch: 106/150 | Batch 0100/0351 | Loss: 0.0744 Epoch: 106/150 | Batch 0200/0351 | Loss: 0.1025 Epoch: 106/150 | Batch 0300/0351 | Loss: 0.1440 Epoch: 106/150 | Train: 96.53% | Validation: 85.48% | Best Validation (Ep. 101): 86.86% Time elapsed: 106.67 min Epoch: 107/150 | Batch 0000/0351 | Loss: 0.0566 Epoch: 107/150 | Batch 0100/0351 | Loss: 0.1675 Epoch: 107/150 | Batch 0200/0351 | Loss: 0.0407 Epoch: 107/150 | Batch 0300/0351 | Loss: 0.0543 Epoch: 107/150 | Train: 97.95% | Validation: 86.94% | Best Validation (Ep. 107): 86.94% Time elapsed: 107.67 min Epoch: 108/150 | Batch 0000/0351 | Loss: 0.2163 Epoch: 108/150 | Batch 0100/0351 | Loss: 0.0963 Epoch: 108/150 | Batch 0200/0351 | Loss: 0.1258 Epoch: 108/150 | Batch 0300/0351 | Loss: 0.0979 Epoch: 108/150 | Train: 97.43% | Validation: 86.24% | Best Validation (Ep. 107): 86.94% Time elapsed: 108.67 min Epoch: 109/150 | Batch 0000/0351 | Loss: 0.0873 Epoch: 109/150 | Batch 0100/0351 | Loss: 0.0255 Epoch: 109/150 | Batch 0200/0351 | Loss: 0.0415 Epoch: 109/150 | Batch 0300/0351 | Loss: 0.0115 Epoch: 109/150 | Train: 97.99% | Validation: 86.72% | Best Validation (Ep. 107): 86.94% Time elapsed: 109.68 min Epoch: 110/150 | Batch 0000/0351 | Loss: 0.0796 Epoch: 110/150 | Batch 0100/0351 | Loss: 0.0529 Epoch: 110/150 | Batch 0200/0351 | Loss: 0.0675 Epoch: 110/150 | Batch 0300/0351 | Loss: 0.2172 Epoch: 110/150 | Train: 97.40% | Validation: 86.62% | Best Validation (Ep. 107): 86.94% Time elapsed: 110.69 min Epoch: 111/150 | Batch 0000/0351 | Loss: 0.0548 Epoch: 111/150 | Batch 0100/0351 | Loss: 0.1361 Epoch: 111/150 | Batch 0200/0351 | Loss: 0.0564 Epoch: 111/150 | Batch 0300/0351 | Loss: 0.0778 Epoch: 111/150 | Train: 98.21% | Validation: 86.96% | Best Validation (Ep. 111): 86.96% Time elapsed: 111.70 min Epoch: 112/150 | Batch 0000/0351 | Loss: 0.0628 Epoch: 112/150 | Batch 0100/0351 | Loss: 0.0384 Epoch: 112/150 | Batch 0200/0351 | Loss: 0.0356 Epoch: 112/150 | Batch 0300/0351 | Loss: 0.0520 Epoch: 112/150 | Train: 97.40% | Validation: 86.48% | Best Validation (Ep. 111): 86.96% Time elapsed: 112.70 min Epoch: 113/150 | Batch 0000/0351 | Loss: 0.1077 Epoch: 113/150 | Batch 0100/0351 | Loss: 0.0919 Epoch: 113/150 | Batch 0200/0351 | Loss: 0.0625 Epoch: 113/150 | Batch 0300/0351 | Loss: 0.2042 Epoch: 113/150 | Train: 97.47% | Validation: 86.56% | Best Validation (Ep. 111): 86.96% Time elapsed: 113.71 min Epoch: 114/150 | Batch 0000/0351 | Loss: 0.1141 Epoch: 114/150 | Batch 0100/0351 | Loss: 0.2226 Epoch: 114/150 | Batch 0200/0351 | Loss: 0.0575 Epoch: 114/150 | Batch 0300/0351 | Loss: 0.1053 Epoch: 114/150 | Train: 98.25% | Validation: 87.02% | Best Validation (Ep. 114): 87.02% Time elapsed: 114.70 min Epoch: 115/150 | Batch 0000/0351 | Loss: 0.0587 Epoch: 115/150 | Batch 0100/0351 | Loss: 0.0172 Epoch: 115/150 | Batch 0200/0351 | Loss: 0.1545 Epoch: 115/150 | Batch 0300/0351 | Loss: 0.0238 Epoch: 115/150 | Train: 97.81% | Validation: 86.26% | Best Validation (Ep. 114): 87.02% Time elapsed: 115.72 min Epoch: 116/150 | Batch 0000/0351 | Loss: 0.0528 Epoch: 116/150 | Batch 0100/0351 | Loss: 0.0652 Epoch: 116/150 | Batch 0200/0351 | Loss: 0.1118 Epoch: 116/150 | Batch 0300/0351 | Loss: 0.1141 Epoch: 116/150 | Train: 98.32% | Validation: 87.34% | Best Validation (Ep. 116): 87.34% Time elapsed: 116.73 min Epoch: 117/150 | Batch 0000/0351 | Loss: 0.0805 Epoch: 117/150 | Batch 0100/0351 | Loss: 0.0498 Epoch: 117/150 | Batch 0200/0351 | Loss: 0.0670 Epoch: 117/150 | Batch 0300/0351 | Loss: 0.0849 Epoch: 117/150 | Train: 97.85% | Validation: 86.88% | Best Validation (Ep. 116): 87.34% Time elapsed: 117.74 min Epoch: 118/150 | Batch 0000/0351 | Loss: 0.1425 Epoch: 118/150 | Batch 0100/0351 | Loss: 0.1146 Epoch: 118/150 | Batch 0200/0351 | Loss: 0.1465 Epoch: 118/150 | Batch 0300/0351 | Loss: 0.1499 Epoch: 118/150 | Train: 98.29% | Validation: 86.68% | Best Validation (Ep. 116): 87.34% Time elapsed: 118.75 min Epoch: 119/150 | Batch 0000/0351 | Loss: 0.0334 Epoch: 119/150 | Batch 0100/0351 | Loss: 0.1217 Epoch: 119/150 | Batch 0200/0351 | Loss: 0.1002 Epoch: 119/150 | Batch 0300/0351 | Loss: 0.0811 Epoch: 119/150 | Train: 97.04% | Validation: 85.44% | Best Validation (Ep. 116): 87.34% Time elapsed: 119.75 min Epoch: 120/150 | Batch 0000/0351 | Loss: 0.0800 Epoch: 120/150 | Batch 0100/0351 | Loss: 0.1308 Epoch: 120/150 | Batch 0200/0351 | Loss: 0.0954 Epoch: 120/150 | Batch 0300/0351 | Loss: 0.0722 Epoch: 120/150 | Train: 97.96% | Validation: 86.48% | Best Validation (Ep. 116): 87.34% Time elapsed: 120.76 min Epoch: 121/150 | Batch 0000/0351 | Loss: 0.0633 Epoch: 121/150 | Batch 0100/0351 | Loss: 0.0596 Epoch: 121/150 | Batch 0200/0351 | Loss: 0.0650 Epoch: 121/150 | Batch 0300/0351 | Loss: 0.0858 Epoch: 121/150 | Train: 98.15% | Validation: 87.00% | Best Validation (Ep. 116): 87.34% Time elapsed: 121.77 min Epoch: 122/150 | Batch 0000/0351 | Loss: 0.0432 Epoch: 122/150 | Batch 0100/0351 | Loss: 0.0434 Epoch: 122/150 | Batch 0200/0351 | Loss: 0.0764 Epoch: 122/150 | Batch 0300/0351 | Loss: 0.0908 Epoch: 122/150 | Train: 97.57% | Validation: 85.24% | Best Validation (Ep. 116): 87.34% Time elapsed: 122.77 min Epoch: 123/150 | Batch 0000/0351 | Loss: 0.0707 Epoch: 123/150 | Batch 0100/0351 | Loss: 0.1320 Epoch: 123/150 | Batch 0200/0351 | Loss: 0.0780 Epoch: 123/150 | Batch 0300/0351 | Loss: 0.1008 Epoch: 123/150 | Train: 97.93% | Validation: 86.40% | Best Validation (Ep. 116): 87.34% Time elapsed: 123.78 min Epoch: 124/150 | Batch 0000/0351 | Loss: 0.1180 Epoch: 124/150 | Batch 0100/0351 | Loss: 0.0359 Epoch: 124/150 | Batch 0200/0351 | Loss: 0.2152 Epoch: 124/150 | Batch 0300/0351 | Loss: 0.0431 Epoch: 124/150 | Train: 97.32% | Validation: 86.04% | Best Validation (Ep. 116): 87.34% Time elapsed: 124.80 min Epoch: 125/150 | Batch 0000/0351 | Loss: 0.0701 Epoch: 125/150 | Batch 0100/0351 | Loss: 0.0732 Epoch: 125/150 | Batch 0200/0351 | Loss: 0.0491 Epoch: 125/150 | Batch 0300/0351 | Loss: 0.0963 Epoch: 125/150 | Train: 97.61% | Validation: 86.56% | Best Validation (Ep. 116): 87.34% Time elapsed: 125.81 min Epoch: 126/150 | Batch 0000/0351 | Loss: 0.0894 Epoch: 126/150 | Batch 0100/0351 | Loss: 0.0397 Epoch: 126/150 | Batch 0200/0351 | Loss: 0.1519 Epoch: 126/150 | Batch 0300/0351 | Loss: 0.1437 Epoch: 126/150 | Train: 98.35% | Validation: 87.02% | Best Validation (Ep. 116): 87.34% Time elapsed: 126.82 min Epoch: 127/150 | Batch 0000/0351 | Loss: 0.0152 Epoch: 127/150 | Batch 0100/0351 | Loss: 0.0303 Epoch: 127/150 | Batch 0200/0351 | Loss: 0.0887 Epoch: 127/150 | Batch 0300/0351 | Loss: 0.1269 Epoch: 127/150 | Train: 97.73% | Validation: 86.18% | Best Validation (Ep. 116): 87.34% Time elapsed: 127.84 min Epoch: 128/150 | Batch 0000/0351 | Loss: 0.1139 Epoch: 128/150 | Batch 0100/0351 | Loss: 0.0522 Epoch: 128/150 | Batch 0200/0351 | Loss: 0.0519 Epoch: 128/150 | Batch 0300/0351 | Loss: 0.0362 Epoch: 128/150 | Train: 98.44% | Validation: 87.12% | Best Validation (Ep. 116): 87.34% Time elapsed: 128.82 min Epoch: 129/150 | Batch 0000/0351 | Loss: 0.0210 Epoch: 129/150 | Batch 0100/0351 | Loss: 0.0894 Epoch: 129/150 | Batch 0200/0351 | Loss: 0.0612 Epoch: 129/150 | Batch 0300/0351 | Loss: 0.0860 Epoch: 129/150 | Train: 98.25% | Validation: 86.14% | Best Validation (Ep. 116): 87.34% Time elapsed: 129.75 min Epoch: 130/150 | Batch 0000/0351 | Loss: 0.0897 Epoch: 130/150 | Batch 0100/0351 | Loss: 0.0551 Epoch: 130/150 | Batch 0200/0351 | Loss: 0.1114 Epoch: 130/150 | Batch 0300/0351 | Loss: 0.1562 Epoch: 130/150 | Train: 98.60% | Validation: 86.82% | Best Validation (Ep. 116): 87.34% Time elapsed: 130.66 min Epoch: 131/150 | Batch 0000/0351 | Loss: 0.0671 Epoch: 131/150 | Batch 0100/0351 | Loss: 0.0875 Epoch: 131/150 | Batch 0200/0351 | Loss: 0.0294 Epoch: 131/150 | Batch 0300/0351 | Loss: 0.0302 Epoch: 131/150 | Train: 98.29% | Validation: 86.80% | Best Validation (Ep. 116): 87.34% Time elapsed: 131.59 min Epoch: 132/150 | Batch 0000/0351 | Loss: 0.0163 Epoch: 132/150 | Batch 0100/0351 | Loss: 0.0185 Epoch: 132/150 | Batch 0200/0351 | Loss: 0.0702 Epoch: 132/150 | Batch 0300/0351 | Loss: 0.0987 Epoch: 132/150 | Train: 98.51% | Validation: 87.52% | Best Validation (Ep. 132): 87.52% Time elapsed: 132.52 min Epoch: 133/150 | Batch 0000/0351 | Loss: 0.0398 Epoch: 133/150 | Batch 0100/0351 | Loss: 0.1218 Epoch: 133/150 | Batch 0200/0351 | Loss: 0.0623 Epoch: 133/150 | Batch 0300/0351 | Loss: 0.0466 Epoch: 133/150 | Train: 98.55% | Validation: 87.08% | Best Validation (Ep. 132): 87.52% Time elapsed: 133.44 min Epoch: 134/150 | Batch 0000/0351 | Loss: 0.0192 Epoch: 134/150 | Batch 0100/0351 | Loss: 0.1406 Epoch: 134/150 | Batch 0200/0351 | Loss: 0.0589 Epoch: 134/150 | Batch 0300/0351 | Loss: 0.0425 Epoch: 134/150 | Train: 98.48% | Validation: 86.78% | Best Validation (Ep. 132): 87.52% Time elapsed: 134.38 min Epoch: 135/150 | Batch 0000/0351 | Loss: 0.0405 Epoch: 135/150 | Batch 0100/0351 | Loss: 0.0785 Epoch: 135/150 | Batch 0200/0351 | Loss: 0.0673 Epoch: 135/150 | Batch 0300/0351 | Loss: 0.0725 Epoch: 135/150 | Train: 98.24% | Validation: 86.54% | Best Validation (Ep. 132): 87.52% Time elapsed: 135.31 min Epoch: 136/150 | Batch 0000/0351 | Loss: 0.0347 Epoch: 136/150 | Batch 0100/0351 | Loss: 0.0493 Epoch: 136/150 | Batch 0200/0351 | Loss: 0.0695 Epoch: 136/150 | Batch 0300/0351 | Loss: 0.0794 Epoch: 136/150 | Train: 98.49% | Validation: 87.10% | Best Validation (Ep. 132): 87.52% Time elapsed: 136.24 min Epoch: 137/150 | Batch 0000/0351 | Loss: 0.0750 Epoch: 137/150 | Batch 0100/0351 | Loss: 0.0435 Epoch: 137/150 | Batch 0200/0351 | Loss: 0.0639 Epoch: 137/150 | Batch 0300/0351 | Loss: 0.0160 Epoch: 137/150 | Train: 97.93% | Validation: 86.44% | Best Validation (Ep. 132): 87.52% Time elapsed: 137.20 min Epoch: 138/150 | Batch 0000/0351 | Loss: 0.0413 Epoch: 138/150 | Batch 0100/0351 | Loss: 0.0341 Epoch: 138/150 | Batch 0200/0351 | Loss: 0.0776 Epoch: 138/150 | Batch 0300/0351 | Loss: 0.0680 Epoch: 138/150 | Train: 98.35% | Validation: 87.04% | Best Validation (Ep. 132): 87.52% Time elapsed: 138.12 min Epoch: 139/150 | Batch 0000/0351 | Loss: 0.0345 Epoch: 139/150 | Batch 0100/0351 | Loss: 0.0879 Epoch: 139/150 | Batch 0200/0351 | Loss: 0.0514 Epoch: 139/150 | Batch 0300/0351 | Loss: 0.0627 Epoch: 139/150 | Train: 98.27% | Validation: 87.06% | Best Validation (Ep. 132): 87.52% Time elapsed: 139.02 min Epoch: 140/150 | Batch 0000/0351 | Loss: 0.0489 Epoch: 140/150 | Batch 0100/0351 | Loss: 0.0190 Epoch: 140/150 | Batch 0200/0351 | Loss: 0.0733 Epoch: 140/150 | Batch 0300/0351 | Loss: 0.0771 Epoch: 140/150 | Train: 98.48% | Validation: 87.62% | Best Validation (Ep. 140): 87.62% Time elapsed: 139.90 min Epoch: 141/150 | Batch 0000/0351 | Loss: 0.0385 Epoch: 141/150 | Batch 0100/0351 | Loss: 0.0582 Epoch: 141/150 | Batch 0200/0351 | Loss: 0.1261 Epoch: 141/150 | Batch 0300/0351 | Loss: 0.0202 Epoch: 141/150 | Train: 98.70% | Validation: 87.38% | Best Validation (Ep. 140): 87.62% Time elapsed: 140.78 min Epoch: 142/150 | Batch 0000/0351 | Loss: 0.0530 Epoch: 142/150 | Batch 0100/0351 | Loss: 0.0471 Epoch: 142/150 | Batch 0200/0351 | Loss: 0.1733 Epoch: 142/150 | Batch 0300/0351 | Loss: 0.0358 Epoch: 142/150 | Train: 98.41% | Validation: 86.80% | Best Validation (Ep. 140): 87.62% Time elapsed: 141.67 min Epoch: 143/150 | Batch 0000/0351 | Loss: 0.0332 Epoch: 143/150 | Batch 0100/0351 | Loss: 0.0897 Epoch: 143/150 | Batch 0200/0351 | Loss: 0.0450 Epoch: 143/150 | Batch 0300/0351 | Loss: 0.0910 Epoch: 143/150 | Train: 98.10% | Validation: 86.34% | Best Validation (Ep. 140): 87.62% Time elapsed: 142.58 min Epoch: 144/150 | Batch 0000/0351 | Loss: 0.0636 Epoch: 144/150 | Batch 0100/0351 | Loss: 0.0644 Epoch: 144/150 | Batch 0200/0351 | Loss: 0.1483 Epoch: 144/150 | Batch 0300/0351 | Loss: 0.0319 Epoch: 144/150 | Train: 98.47% | Validation: 87.28% | Best Validation (Ep. 140): 87.62% Time elapsed: 143.47 min Epoch: 145/150 | Batch 0000/0351 | Loss: 0.0393 Epoch: 145/150 | Batch 0100/0351 | Loss: 0.0760 Epoch: 145/150 | Batch 0200/0351 | Loss: 0.0751 Epoch: 145/150 | Batch 0300/0351 | Loss: 0.0192 Epoch: 145/150 | Train: 98.17% | Validation: 86.60% | Best Validation (Ep. 140): 87.62% Time elapsed: 144.38 min Epoch: 146/150 | Batch 0000/0351 | Loss: 0.0297 Epoch: 146/150 | Batch 0100/0351 | Loss: 0.0962 Epoch: 146/150 | Batch 0200/0351 | Loss: 0.0912 Epoch: 146/150 | Batch 0300/0351 | Loss: 0.0400 Epoch: 146/150 | Train: 97.96% | Validation: 86.04% | Best Validation (Ep. 140): 87.62% Time elapsed: 145.29 min Epoch: 147/150 | Batch 0000/0351 | Loss: 0.0399 Epoch: 147/150 | Batch 0100/0351 | Loss: 0.0214 Epoch: 147/150 | Batch 0200/0351 | Loss: 0.0363 Epoch: 147/150 | Batch 0300/0351 | Loss: 0.0488 Epoch: 147/150 | Train: 98.11% | Validation: 86.34% | Best Validation (Ep. 140): 87.62% Time elapsed: 146.17 min Epoch: 148/150 | Batch 0000/0351 | Loss: 0.0603 Epoch: 148/150 | Batch 0100/0351 | Loss: 0.0407 Epoch: 148/150 | Batch 0200/0351 | Loss: 0.0466 Epoch: 148/150 | Batch 0300/0351 | Loss: 0.0600 Epoch: 148/150 | Train: 98.57% | Validation: 87.24% | Best Validation (Ep. 140): 87.62% Time elapsed: 147.06 min Epoch: 149/150 | Batch 0000/0351 | Loss: 0.0535 Epoch: 149/150 | Batch 0100/0351 | Loss: 0.0428 Epoch: 149/150 | Batch 0200/0351 | Loss: 0.0309 Epoch: 149/150 | Batch 0300/0351 | Loss: 0.0754 Epoch: 149/150 | Train: 98.42% | Validation: 87.24% | Best Validation (Ep. 140): 87.62% Time elapsed: 148.01 min Epoch: 150/150 | Batch 0000/0351 | Loss: 0.0446 Epoch: 150/150 | Batch 0100/0351 | Loss: 0.0869 Epoch: 150/150 | Batch 0200/0351 | Loss: 0.0135 Epoch: 150/150 | Batch 0300/0351 | Loss: 0.0404 Epoch: 150/150 | Train: 98.73% | Validation: 87.46% | Best Validation (Ep. 140): 87.62% Time elapsed: 149.14 min Total Training Time: 149.14 min Test accuracy 86.33%
model.load_state_dict(torch.load('mobilenet-v2-best-1.pt'))
model.eval()
test_acc = compute_accuracy(model, test_loader, device=DEVICE)
print(f'Test accuracy: {test_acc:.2f}%')
Test accuracy: 86.66%
model.cpu()
unnormalizer = UnNormalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
class_dict = {0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'}
show_examples(model=model, data_loader=test_loader, unnormalizer=unnormalizer, class_dict=class_dict)
mat = compute_confusion_matrix(model=model, data_loader=test_loader, device=torch.device('cpu'))
plot_confusion_matrix(mat, class_names=class_dict.values())
plt.show()