Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
Author: Sebastian Raschka Python implementation: CPython Python version : 3.8.8 IPython version : 7.21.0 torch: 1.8.1+cu111
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, "..") # to include ../helper_evaluate.py etc.
# From local helper files
from helper_utils import set_all_seeds, set_deterministic
from helper_evaluate import compute_confusion_matrix, compute_accuracy
from helper_train import train_classifier_simple_v2
from helper_plotting import plot_training_loss, plot_accuracy, show_examples, plot_confusion_matrix
from helper_data import get_dataloaders_cifar10, UnNormalize
##########################
### SETTINGS
##########################
RANDOM_SEED = 123
BATCH_SIZE = 128
NUM_EPOCHS = 150
DEVICE = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
set_all_seeds(RANDOM_SEED)
#set_deterministic()
##########################
### CIFAR-10 DATASET
##########################
### Note: Network trains about 2-3x faster if you don't
# resize (keeping the orig. 32x32 res.)
# Test acc. I got via the 32x32 was lower though; ~77%
train_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((70, 70)),
torchvision.transforms.RandomCrop((64, 64)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
test_transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((70, 70)),
torchvision.transforms.CenterCrop((64, 64)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_loader, valid_loader, test_loader = get_dataloaders_cifar10(
batch_size=BATCH_SIZE,
validation_fraction=0.1,
train_transforms=train_transforms,
test_transforms=test_transforms,
num_workers=2)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
print('Class labels of 10 examples:', labels[:10])
break
Files already downloaded and verified Image batch dimensions: torch.Size([128, 3, 64, 64]) Image label dimensions: torch.Size([128]) Class labels of 10 examples: tensor([4, 7, 4, 6, 2, 6, 9, 7, 3, 0])
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(np.transpose(torchvision.utils.make_grid(images[:64],
padding=2, normalize=True),
(1, 2, 0)))
<matplotlib.image.AxesImage at 0x7ff6fbea4dc0>
##########################
### MODEL
##########################
model = torch.hub.load('pytorch/vision:v0.9.0', 'mobilenet_v3_large',
pretrained=False)
model.classifier[-1] = torch.nn.Linear(in_features=1280, # as in original
out_features=10) # number of class labels in Cifar-10)
model = model.to(DEVICE)
Using cache found in /home/raschka/.cache/torch/hub/pytorch_vision_v0.9.0
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
minibatch_loss_list, train_acc_list, valid_acc_list = train_classifier_simple_v2(
model=model,
num_epochs=NUM_EPOCHS,
train_loader=train_loader,
valid_loader=valid_loader,
test_loader=test_loader,
optimizer=optimizer,
best_model_save_path='mobilenet-v2-best-1.pt',
device=DEVICE,
scheduler_on='valid_acc',
logging_interval=100)
plot_training_loss(minibatch_loss_list=minibatch_loss_list,
num_epochs=NUM_EPOCHS,
iter_per_epoch=len(train_loader),
results_dir=None,
averaging_iterations=200)
plt.show()
plot_accuracy(train_acc_list=train_acc_list,
valid_acc_list=valid_acc_list,
results_dir=None)
plt.ylim([60, 100])
plt.show()
Epoch: 001/150 | Batch 0000/0351 | Loss: 2.3076 Epoch: 001/150 | Batch 0100/0351 | Loss: 2.0081 Epoch: 001/150 | Batch 0200/0351 | Loss: 1.9986 Epoch: 001/150 | Batch 0300/0351 | Loss: 1.7279 Epoch: 001/150 | Train: 27.14% | Validation: 26.70% | Best Validation (Ep. 001): 26.70% Time elapsed: 0.89 min Epoch: 002/150 | Batch 0000/0351 | Loss: 1.7480 Epoch: 002/150 | Batch 0100/0351 | Loss: 1.7988 Epoch: 002/150 | Batch 0200/0351 | Loss: 1.6141 Epoch: 002/150 | Batch 0300/0351 | Loss: 1.4341 Epoch: 002/150 | Train: 34.20% | Validation: 34.14% | Best Validation (Ep. 002): 34.14% Time elapsed: 1.80 min Epoch: 003/150 | Batch 0000/0351 | Loss: 1.4082 Epoch: 003/150 | Batch 0100/0351 | Loss: 1.8504 Epoch: 003/150 | Batch 0200/0351 | Loss: 1.9649 Epoch: 003/150 | Batch 0300/0351 | Loss: 1.7910 Epoch: 003/150 | Train: 20.76% | Validation: 20.50% | Best Validation (Ep. 002): 34.14% Time elapsed: 2.70 min Epoch: 004/150 | Batch 0000/0351 | Loss: 2.1098 Epoch: 004/150 | Batch 0100/0351 | Loss: 1.7054 Epoch: 004/150 | Batch 0200/0351 | Loss: 1.6409 Epoch: 004/150 | Batch 0300/0351 | Loss: 1.6833 Epoch: 004/150 | Train: 25.73% | Validation: 26.28% | Best Validation (Ep. 002): 34.14% Time elapsed: 3.60 min Epoch: 005/150 | Batch 0000/0351 | Loss: 1.4867 Epoch: 005/150 | Batch 0100/0351 | Loss: 1.5231 Epoch: 005/150 | Batch 0200/0351 | Loss: 1.5228 Epoch: 005/150 | Batch 0300/0351 | Loss: 1.4508 Epoch: 005/150 | Train: 21.77% | Validation: 21.74% | Best Validation (Ep. 002): 34.14% Time elapsed: 4.51 min Epoch: 006/150 | Batch 0000/0351 | Loss: 1.8173 Epoch: 006/150 | Batch 0100/0351 | Loss: 1.5549 Epoch: 006/150 | Batch 0200/0351 | Loss: 1.6148 Epoch: 006/150 | Batch 0300/0351 | Loss: 1.4918 Epoch: 006/150 | Train: 41.01% | Validation: 42.12% | Best Validation (Ep. 006): 42.12% Time elapsed: 5.41 min Epoch: 007/150 | Batch 0000/0351 | Loss: 1.5850 Epoch: 007/150 | Batch 0100/0351 | Loss: 3.7750 Epoch: 007/150 | Batch 0200/0351 | Loss: 2.0127 Epoch: 007/150 | Batch 0300/0351 | Loss: 1.8385 Epoch: 007/150 | Train: 35.93% | Validation: 35.82% | Best Validation (Ep. 006): 42.12% Time elapsed: 6.32 min Epoch: 008/150 | Batch 0000/0351 | Loss: 1.6980 Epoch: 008/150 | Batch 0100/0351 | Loss: 1.4443 Epoch: 008/150 | Batch 0200/0351 | Loss: 1.4670 Epoch: 008/150 | Batch 0300/0351 | Loss: 1.2220 Epoch: 008/150 | Train: 48.56% | Validation: 47.88% | Best Validation (Ep. 008): 47.88% Time elapsed: 7.24 min Epoch: 009/150 | Batch 0000/0351 | Loss: 1.3836 Epoch: 009/150 | Batch 0100/0351 | Loss: 1.3566 Epoch: 009/150 | Batch 0200/0351 | Loss: 1.3001 Epoch: 009/150 | Batch 0300/0351 | Loss: 1.4588 Epoch: 009/150 | Train: 45.02% | Validation: 44.60% | Best Validation (Ep. 008): 47.88% Time elapsed: 8.12 min Epoch: 010/150 | Batch 0000/0351 | Loss: 1.3840 Epoch: 010/150 | Batch 0100/0351 | Loss: 1.1890 Epoch: 010/150 | Batch 0200/0351 | Loss: 1.1205 Epoch: 010/150 | Batch 0300/0351 | Loss: 1.3021 Epoch: 010/150 | Train: 53.25% | Validation: 53.34% | Best Validation (Ep. 010): 53.34% Time elapsed: 9.05 min Epoch: 011/150 | Batch 0000/0351 | Loss: 1.1971 Epoch: 011/150 | Batch 0100/0351 | Loss: 1.6346 Epoch: 011/150 | Batch 0200/0351 | Loss: 1.3366 Epoch: 011/150 | Batch 0300/0351 | Loss: 1.5381 Epoch: 011/150 | Train: 41.50% | Validation: 42.68% | Best Validation (Ep. 010): 53.34% Time elapsed: 9.94 min Epoch: 012/150 | Batch 0000/0351 | Loss: 1.1988 Epoch: 012/150 | Batch 0100/0351 | Loss: 1.3623 Epoch: 012/150 | Batch 0200/0351 | Loss: 1.2338 Epoch: 012/150 | Batch 0300/0351 | Loss: 1.3024 Epoch: 012/150 | Train: 49.14% | Validation: 49.96% | Best Validation (Ep. 010): 53.34% Time elapsed: 10.82 min Epoch: 013/150 | Batch 0000/0351 | Loss: 1.1994 Epoch: 013/150 | Batch 0100/0351 | Loss: 1.2604 Epoch: 013/150 | Batch 0200/0351 | Loss: 1.2285 Epoch: 013/150 | Batch 0300/0351 | Loss: 1.2587 Epoch: 013/150 | Train: 39.40% | Validation: 39.78% | Best Validation (Ep. 010): 53.34% Time elapsed: 11.72 min Epoch: 014/150 | Batch 0000/0351 | Loss: 1.4605 Epoch: 014/150 | Batch 0100/0351 | Loss: 1.2551 Epoch: 014/150 | Batch 0200/0351 | Loss: 1.0527 Epoch: 014/150 | Batch 0300/0351 | Loss: 1.4169 Epoch: 014/150 | Train: 58.45% | Validation: 58.64% | Best Validation (Ep. 014): 58.64% Time elapsed: 12.62 min Epoch: 015/150 | Batch 0000/0351 | Loss: 1.0974 Epoch: 015/150 | Batch 0100/0351 | Loss: 1.2195 Epoch: 015/150 | Batch 0200/0351 | Loss: 1.1839 Epoch: 015/150 | Batch 0300/0351 | Loss: 1.3045 Epoch: 015/150 | Train: 26.79% | Validation: 27.96% | Best Validation (Ep. 014): 58.64% Time elapsed: 13.53 min Epoch: 016/150 | Batch 0000/0351 | Loss: 1.5734 Epoch: 016/150 | Batch 0100/0351 | Loss: 1.3538 Epoch: 016/150 | Batch 0200/0351 | Loss: 1.2023 Epoch: 016/150 | Batch 0300/0351 | Loss: 1.2136 Epoch: 016/150 | Train: 54.88% | Validation: 55.14% | Best Validation (Ep. 014): 58.64% Time elapsed: 14.44 min Epoch: 017/150 | Batch 0000/0351 | Loss: 1.1840 Epoch: 017/150 | Batch 0100/0351 | Loss: 0.9324 Epoch: 017/150 | Batch 0200/0351 | Loss: 0.9516 Epoch: 017/150 | Batch 0300/0351 | Loss: 0.9169 Epoch: 017/150 | Train: 62.53% | Validation: 61.14% | Best Validation (Ep. 017): 61.14% Time elapsed: 15.35 min Epoch: 018/150 | Batch 0000/0351 | Loss: 0.8861 Epoch: 018/150 | Batch 0100/0351 | Loss: 0.8667 Epoch: 018/150 | Batch 0200/0351 | Loss: 1.0753 Epoch: 018/150 | Batch 0300/0351 | Loss: 0.9594 Epoch: 018/150 | Train: 62.97% | Validation: 62.28% | Best Validation (Ep. 018): 62.28% Time elapsed: 16.24 min Epoch: 019/150 | Batch 0000/0351 | Loss: 1.0962 Epoch: 019/150 | Batch 0100/0351 | Loss: 0.9598 Epoch: 019/150 | Batch 0200/0351 | Loss: 0.8322 Epoch: 019/150 | Batch 0300/0351 | Loss: 0.8892 Epoch: 019/150 | Train: 67.85% | Validation: 67.78% | Best Validation (Ep. 019): 67.78% Time elapsed: 17.15 min Epoch: 020/150 | Batch 0000/0351 | Loss: 0.8361 Epoch: 020/150 | Batch 0100/0351 | Loss: 0.9562 Epoch: 020/150 | Batch 0200/0351 | Loss: 1.1306 Epoch: 020/150 | Batch 0300/0351 | Loss: 0.8919 Epoch: 020/150 | Train: 63.12% | Validation: 62.72% | Best Validation (Ep. 019): 67.78% Time elapsed: 18.05 min Epoch: 021/150 | Batch 0000/0351 | Loss: 0.8295 Epoch: 021/150 | Batch 0100/0351 | Loss: 0.9982 Epoch: 021/150 | Batch 0200/0351 | Loss: 0.8351 Epoch: 021/150 | Batch 0300/0351 | Loss: 0.9628 Epoch: 021/150 | Train: 68.69% | Validation: 67.74% | Best Validation (Ep. 019): 67.78% Time elapsed: 18.96 min Epoch: 022/150 | Batch 0000/0351 | Loss: 0.8451 Epoch: 022/150 | Batch 0100/0351 | Loss: 0.9687 Epoch: 022/150 | Batch 0200/0351 | Loss: 1.0117 Epoch: 022/150 | Batch 0300/0351 | Loss: 1.0060 Epoch: 022/150 | Train: 71.30% | Validation: 69.70% | Best Validation (Ep. 022): 69.70% Time elapsed: 19.87 min Epoch: 023/150 | Batch 0000/0351 | Loss: 0.6442 Epoch: 023/150 | Batch 0100/0351 | Loss: 0.7843 Epoch: 023/150 | Batch 0200/0351 | Loss: 0.7825 Epoch: 023/150 | Batch 0300/0351 | Loss: 0.7985 Epoch: 023/150 | Train: 71.67% | Validation: 70.06% | Best Validation (Ep. 023): 70.06% Time elapsed: 20.78 min Epoch: 024/150 | Batch 0000/0351 | Loss: 0.9101 Epoch: 024/150 | Batch 0100/0351 | Loss: 0.7812 Epoch: 024/150 | Batch 0200/0351 | Loss: 0.9105 Epoch: 024/150 | Batch 0300/0351 | Loss: 0.8026 Epoch: 024/150 | Train: 72.08% | Validation: 68.82% | Best Validation (Ep. 023): 70.06% Time elapsed: 21.69 min Epoch: 025/150 | Batch 0000/0351 | Loss: 0.5602 Epoch: 025/150 | Batch 0100/0351 | Loss: 0.6070 Epoch: 025/150 | Batch 0200/0351 | Loss: 0.6325 Epoch: 025/150 | Batch 0300/0351 | Loss: 0.6928 Epoch: 025/150 | Train: 70.51% | Validation: 67.60% | Best Validation (Ep. 023): 70.06% Time elapsed: 22.60 min Epoch: 026/150 | Batch 0000/0351 | Loss: 0.5367 Epoch: 026/150 | Batch 0100/0351 | Loss: 0.8989 Epoch: 026/150 | Batch 0200/0351 | Loss: 0.5212 Epoch: 026/150 | Batch 0300/0351 | Loss: 0.7125 Epoch: 026/150 | Train: 75.36% | Validation: 73.14% | Best Validation (Ep. 026): 73.14% Time elapsed: 23.53 min Epoch: 027/150 | Batch 0000/0351 | Loss: 0.8193 Epoch: 027/150 | Batch 0100/0351 | Loss: 0.8982 Epoch: 027/150 | Batch 0200/0351 | Loss: 0.8852 Epoch: 027/150 | Batch 0300/0351 | Loss: 0.4962 Epoch: 027/150 | Train: 77.19% | Validation: 73.48% | Best Validation (Ep. 027): 73.48% Time elapsed: 24.44 min Epoch: 028/150 | Batch 0000/0351 | Loss: 0.5279 Epoch: 028/150 | Batch 0100/0351 | Loss: 0.6709 Epoch: 028/150 | Batch 0200/0351 | Loss: 0.5862 Epoch: 028/150 | Batch 0300/0351 | Loss: 0.6416 Epoch: 028/150 | Train: 75.25% | Validation: 72.28% | Best Validation (Ep. 027): 73.48% Time elapsed: 25.36 min Epoch: 029/150 | Batch 0000/0351 | Loss: 0.4502 Epoch: 029/150 | Batch 0100/0351 | Loss: 0.5605 Epoch: 029/150 | Batch 0200/0351 | Loss: 0.6147 Epoch: 029/150 | Batch 0300/0351 | Loss: 0.5646 Epoch: 029/150 | Train: 78.96% | Validation: 74.88% | Best Validation (Ep. 029): 74.88% Time elapsed: 26.25 min Epoch: 030/150 | Batch 0000/0351 | Loss: 0.5205 Epoch: 030/150 | Batch 0100/0351 | Loss: 0.7171 Epoch: 030/150 | Batch 0200/0351 | Loss: 0.7186 Epoch: 030/150 | Batch 0300/0351 | Loss: 0.5880 Epoch: 030/150 | Train: 67.65% | Validation: 64.82% | Best Validation (Ep. 029): 74.88% Time elapsed: 27.15 min Epoch: 031/150 | Batch 0000/0351 | Loss: 0.4787 Epoch: 031/150 | Batch 0100/0351 | Loss: 0.4902 Epoch: 031/150 | Batch 0200/0351 | Loss: 0.5266 Epoch: 031/150 | Batch 0300/0351 | Loss: 0.5851 Epoch: 031/150 | Train: 74.92% | Validation: 72.12% | Best Validation (Ep. 029): 74.88% Time elapsed: 28.05 min Epoch: 032/150 | Batch 0000/0351 | Loss: 0.4245 Epoch: 032/150 | Batch 0100/0351 | Loss: 0.6588 Epoch: 032/150 | Batch 0200/0351 | Loss: 0.4831 Epoch: 032/150 | Batch 0300/0351 | Loss: 0.5680 Epoch: 032/150 | Train: 81.41% | Validation: 77.40% | Best Validation (Ep. 032): 77.40% Time elapsed: 28.98 min Epoch: 033/150 | Batch 0000/0351 | Loss: 0.5653 Epoch: 033/150 | Batch 0100/0351 | Loss: 0.4830 Epoch: 033/150 | Batch 0200/0351 | Loss: 0.5547 Epoch: 033/150 | Batch 0300/0351 | Loss: 0.5965 Epoch: 033/150 | Train: 78.42% | Validation: 74.70% | Best Validation (Ep. 032): 77.40% Time elapsed: 29.88 min Epoch: 034/150 | Batch 0000/0351 | Loss: 0.3917 Epoch: 034/150 | Batch 0100/0351 | Loss: 0.5057 Epoch: 034/150 | Batch 0200/0351 | Loss: 0.5771 Epoch: 034/150 | Batch 0300/0351 | Loss: 0.3643 Epoch: 034/150 | Train: 81.28% | Validation: 76.88% | Best Validation (Ep. 032): 77.40% Time elapsed: 30.77 min Epoch: 035/150 | Batch 0000/0351 | Loss: 0.4525 Epoch: 035/150 | Batch 0100/0351 | Loss: 0.4910 Epoch: 035/150 | Batch 0200/0351 | Loss: 0.4328 Epoch: 035/150 | Batch 0300/0351 | Loss: 0.6234 Epoch: 035/150 | Train: 79.15% | Validation: 74.82% | Best Validation (Ep. 032): 77.40% Time elapsed: 31.67 min Epoch: 036/150 | Batch 0000/0351 | Loss: 0.4805 Epoch: 036/150 | Batch 0100/0351 | Loss: 0.3122 Epoch: 036/150 | Batch 0200/0351 | Loss: 0.5138 Epoch: 036/150 | Batch 0300/0351 | Loss: 0.3494 Epoch: 036/150 | Train: 82.85% | Validation: 78.42% | Best Validation (Ep. 036): 78.42% Time elapsed: 32.57 min Epoch: 037/150 | Batch 0000/0351 | Loss: 0.5090 Epoch: 037/150 | Batch 0100/0351 | Loss: 0.3107 Epoch: 037/150 | Batch 0200/0351 | Loss: 0.4884 Epoch: 037/150 | Batch 0300/0351 | Loss: 0.4592 Epoch: 037/150 | Train: 83.35% | Validation: 78.04% | Best Validation (Ep. 036): 78.42% Time elapsed: 33.47 min Epoch: 038/150 | Batch 0000/0351 | Loss: 0.3824 Epoch: 038/150 | Batch 0100/0351 | Loss: 0.4477 Epoch: 038/150 | Batch 0200/0351 | Loss: 0.3396 Epoch: 038/150 | Batch 0300/0351 | Loss: 0.5993 Epoch: 038/150 | Train: 78.26% | Validation: 72.98% | Best Validation (Ep. 036): 78.42% Time elapsed: 34.36 min Epoch: 039/150 | Batch 0000/0351 | Loss: 0.5105 Epoch: 039/150 | Batch 0100/0351 | Loss: 0.6280 Epoch: 039/150 | Batch 0200/0351 | Loss: 0.4458 Epoch: 039/150 | Batch 0300/0351 | Loss: 0.3871 Epoch: 039/150 | Train: 86.15% | Validation: 79.12% | Best Validation (Ep. 039): 79.12% Time elapsed: 35.28 min Epoch: 040/150 | Batch 0000/0351 | Loss: 0.4583 Epoch: 040/150 | Batch 0100/0351 | Loss: 0.4033 Epoch: 040/150 | Batch 0200/0351 | Loss: 0.4741 Epoch: 040/150 | Batch 0300/0351 | Loss: 0.4504 Epoch: 040/150 | Train: 83.08% | Validation: 77.12% | Best Validation (Ep. 039): 79.12% Time elapsed: 36.18 min Epoch: 041/150 | Batch 0000/0351 | Loss: 0.5335 Epoch: 041/150 | Batch 0100/0351 | Loss: 0.6043 Epoch: 041/150 | Batch 0200/0351 | Loss: 0.5576 Epoch: 041/150 | Batch 0300/0351 | Loss: 0.6202 Epoch: 041/150 | Train: 84.40% | Validation: 77.56% | Best Validation (Ep. 039): 79.12% Time elapsed: 37.08 min Epoch: 042/150 | Batch 0000/0351 | Loss: 0.4168 Epoch: 042/150 | Batch 0100/0351 | Loss: 0.3675 Epoch: 042/150 | Batch 0200/0351 | Loss: 0.3857 Epoch: 042/150 | Batch 0300/0351 | Loss: 0.3439 Epoch: 042/150 | Train: 81.62% | Validation: 75.82% | Best Validation (Ep. 039): 79.12% Time elapsed: 38.00 min Epoch: 043/150 | Batch 0000/0351 | Loss: 0.3558 Epoch: 043/150 | Batch 0100/0351 | Loss: 0.4043 Epoch: 043/150 | Batch 0200/0351 | Loss: 0.3385 Epoch: 043/150 | Batch 0300/0351 | Loss: 0.3462 Epoch: 043/150 | Train: 85.72% | Validation: 78.50% | Best Validation (Ep. 039): 79.12% Time elapsed: 38.89 min Epoch: 044/150 | Batch 0000/0351 | Loss: 0.3680 Epoch: 044/150 | Batch 0100/0351 | Loss: 0.4422 Epoch: 044/150 | Batch 0200/0351 | Loss: 0.3275 Epoch: 044/150 | Batch 0300/0351 | Loss: 0.4145 Epoch: 044/150 | Train: 86.24% | Validation: 79.18% | Best Validation (Ep. 044): 79.18% Time elapsed: 39.80 min Epoch: 045/150 | Batch 0000/0351 | Loss: 0.4378 Epoch: 045/150 | Batch 0100/0351 | Loss: 0.3889 Epoch: 045/150 | Batch 0200/0351 | Loss: 0.3415 Epoch: 045/150 | Batch 0300/0351 | Loss: 0.3599 Epoch: 045/150 | Train: 86.67% | Validation: 79.36% | Best Validation (Ep. 045): 79.36% Time elapsed: 40.69 min Epoch: 046/150 | Batch 0000/0351 | Loss: 0.2219 Epoch: 046/150 | Batch 0100/0351 | Loss: 0.2548 Epoch: 046/150 | Batch 0200/0351 | Loss: 0.4629 Epoch: 046/150 | Batch 0300/0351 | Loss: 0.2503 Epoch: 046/150 | Train: 84.20% | Validation: 77.34% | Best Validation (Ep. 045): 79.36% Time elapsed: 41.60 min Epoch: 047/150 | Batch 0000/0351 | Loss: 0.3045 Epoch: 047/150 | Batch 0100/0351 | Loss: 0.3014 Epoch: 047/150 | Batch 0200/0351 | Loss: 0.3372 Epoch: 047/150 | Batch 0300/0351 | Loss: 0.3284 Epoch: 047/150 | Train: 88.60% | Validation: 79.78% | Best Validation (Ep. 047): 79.78% Time elapsed: 42.51 min Epoch: 048/150 | Batch 0000/0351 | Loss: 0.2839 Epoch: 048/150 | Batch 0100/0351 | Loss: 0.2149 Epoch: 048/150 | Batch 0200/0351 | Loss: 0.2752 Epoch: 048/150 | Batch 0300/0351 | Loss: 0.4552 Epoch: 048/150 | Train: 78.18% | Validation: 71.58% | Best Validation (Ep. 047): 79.78% Time elapsed: 43.40 min Epoch: 049/150 | Batch 0000/0351 | Loss: 0.3395 Epoch: 049/150 | Batch 0100/0351 | Loss: 0.2281 Epoch: 049/150 | Batch 0200/0351 | Loss: 0.4258 Epoch: 049/150 | Batch 0300/0351 | Loss: 0.4567 Epoch: 049/150 | Train: 86.77% | Validation: 78.48% | Best Validation (Ep. 047): 79.78% Time elapsed: 44.31 min Epoch: 050/150 | Batch 0000/0351 | Loss: 0.2738 Epoch: 050/150 | Batch 0100/0351 | Loss: 0.2489 Epoch: 050/150 | Batch 0200/0351 | Loss: 0.2980 Epoch: 050/150 | Batch 0300/0351 | Loss: 0.3466 Epoch: 050/150 | Train: 84.23% | Validation: 77.16% | Best Validation (Ep. 047): 79.78% Time elapsed: 45.22 min Epoch: 051/150 | Batch 0000/0351 | Loss: 0.2577 Epoch: 051/150 | Batch 0100/0351 | Loss: 0.3573 Epoch: 051/150 | Batch 0200/0351 | Loss: 0.3086 Epoch: 051/150 | Batch 0300/0351 | Loss: 0.2813 Epoch: 051/150 | Train: 88.37% | Validation: 79.82% | Best Validation (Ep. 051): 79.82% Time elapsed: 46.13 min Epoch: 052/150 | Batch 0000/0351 | Loss: 0.2546 Epoch: 052/150 | Batch 0100/0351 | Loss: 0.3225 Epoch: 052/150 | Batch 0200/0351 | Loss: 0.3223 Epoch: 052/150 | Batch 0300/0351 | Loss: 0.1522 Epoch: 052/150 | Train: 87.78% | Validation: 78.52% | Best Validation (Ep. 051): 79.82% Time elapsed: 47.05 min Epoch: 053/150 | Batch 0000/0351 | Loss: 0.2394 Epoch: 053/150 | Batch 0100/0351 | Loss: 0.1964 Epoch: 053/150 | Batch 0200/0351 | Loss: 0.4107 Epoch: 053/150 | Batch 0300/0351 | Loss: 0.3266 Epoch: 053/150 | Train: 88.44% | Validation: 79.08% | Best Validation (Ep. 051): 79.82% Time elapsed: 47.97 min Epoch: 054/150 | Batch 0000/0351 | Loss: 0.2149 Epoch: 054/150 | Batch 0100/0351 | Loss: 0.1428 Epoch: 054/150 | Batch 0200/0351 | Loss: 0.4115 Epoch: 054/150 | Batch 0300/0351 | Loss: 0.2823 Epoch: 054/150 | Train: 87.15% | Validation: 78.50% | Best Validation (Ep. 051): 79.82% Time elapsed: 48.87 min Epoch: 055/150 | Batch 0000/0351 | Loss: 0.1850 Epoch: 055/150 | Batch 0100/0351 | Loss: 0.3259 Epoch: 055/150 | Batch 0200/0351 | Loss: 0.1379 Epoch: 055/150 | Batch 0300/0351 | Loss: 0.4955 Epoch: 055/150 | Train: 88.93% | Validation: 79.82% | Best Validation (Ep. 051): 79.82% Time elapsed: 49.78 min Epoch: 056/150 | Batch 0000/0351 | Loss: 0.1273 Epoch: 056/150 | Batch 0100/0351 | Loss: 0.2633 Epoch: 056/150 | Batch 0200/0351 | Loss: 0.3219 Epoch: 056/150 | Batch 0300/0351 | Loss: 0.2709 Epoch: 056/150 | Train: 90.50% | Validation: 80.94% | Best Validation (Ep. 056): 80.94% Time elapsed: 50.66 min Epoch: 057/150 | Batch 0000/0351 | Loss: 0.2265 Epoch: 057/150 | Batch 0100/0351 | Loss: 0.1132 Epoch: 057/150 | Batch 0200/0351 | Loss: 0.1808 Epoch: 057/150 | Batch 0300/0351 | Loss: 0.3208 Epoch: 057/150 | Train: 86.74% | Validation: 77.62% | Best Validation (Ep. 056): 80.94% Time elapsed: 51.57 min Epoch: 058/150 | Batch 0000/0351 | Loss: 0.2893 Epoch: 058/150 | Batch 0100/0351 | Loss: 0.0826 Epoch: 058/150 | Batch 0200/0351 | Loss: 0.2187 Epoch: 058/150 | Batch 0300/0351 | Loss: 0.1858 Epoch: 058/150 | Train: 84.23% | Validation: 77.10% | Best Validation (Ep. 056): 80.94% Time elapsed: 52.50 min Epoch: 059/150 | Batch 0000/0351 | Loss: 0.1600 Epoch: 059/150 | Batch 0100/0351 | Loss: 0.1777 Epoch: 059/150 | Batch 0200/0351 | Loss: 0.2077 Epoch: 059/150 | Batch 0300/0351 | Loss: 0.2975 Epoch: 059/150 | Train: 91.84% | Validation: 82.00% | Best Validation (Ep. 059): 82.00% Time elapsed: 53.42 min Epoch: 060/150 | Batch 0000/0351 | Loss: 0.2777 Epoch: 060/150 | Batch 0100/0351 | Loss: 0.1595 Epoch: 060/150 | Batch 0200/0351 | Loss: 0.3060 Epoch: 060/150 | Batch 0300/0351 | Loss: 0.2316 Epoch: 060/150 | Train: 92.38% | Validation: 81.60% | Best Validation (Ep. 059): 82.00% Time elapsed: 54.32 min Epoch: 061/150 | Batch 0000/0351 | Loss: 0.3001 Epoch: 061/150 | Batch 0100/0351 | Loss: 0.1128 Epoch: 061/150 | Batch 0200/0351 | Loss: 0.2172 Epoch: 061/150 | Batch 0300/0351 | Loss: 0.2087 Epoch: 061/150 | Train: 89.69% | Validation: 79.06% | Best Validation (Ep. 059): 82.00% Time elapsed: 55.22 min Epoch: 062/150 | Batch 0000/0351 | Loss: 0.1707 Epoch: 062/150 | Batch 0100/0351 | Loss: 0.2166 Epoch: 062/150 | Batch 0200/0351 | Loss: 0.1431 Epoch: 062/150 | Batch 0300/0351 | Loss: 0.3247 Epoch: 062/150 | Train: 89.79% | Validation: 80.16% | Best Validation (Ep. 059): 82.00% Time elapsed: 56.13 min Epoch: 063/150 | Batch 0000/0351 | Loss: 0.0691 Epoch: 063/150 | Batch 0100/0351 | Loss: 0.2311 Epoch: 063/150 | Batch 0200/0351 | Loss: 0.1659 Epoch: 063/150 | Batch 0300/0351 | Loss: 0.1838 Epoch: 063/150 | Train: 91.47% | Validation: 81.24% | Best Validation (Ep. 059): 82.00% Time elapsed: 57.02 min Epoch: 064/150 | Batch 0000/0351 | Loss: 0.0946 Epoch: 064/150 | Batch 0100/0351 | Loss: 0.3341 Epoch: 064/150 | Batch 0200/0351 | Loss: 0.2076 Epoch: 064/150 | Batch 0300/0351 | Loss: 0.3085 Epoch: 064/150 | Train: 91.74% | Validation: 81.30% | Best Validation (Ep. 059): 82.00% Time elapsed: 57.93 min Epoch: 065/150 | Batch 0000/0351 | Loss: 0.1718 Epoch: 065/150 | Batch 0100/0351 | Loss: 0.1101 Epoch: 065/150 | Batch 0200/0351 | Loss: 0.1696 Epoch: 065/150 | Batch 0300/0351 | Loss: 0.2618 Epoch: 065/150 | Train: 88.16% | Validation: 78.00% | Best Validation (Ep. 059): 82.00% Time elapsed: 58.84 min Epoch: 066/150 | Batch 0000/0351 | Loss: 0.1535 Epoch: 066/150 | Batch 0100/0351 | Loss: 0.1903 Epoch: 066/150 | Batch 0200/0351 | Loss: 0.1092 Epoch: 066/150 | Batch 0300/0351 | Loss: 0.1489 Epoch: 066/150 | Train: 93.51% | Validation: 82.04% | Best Validation (Ep. 066): 82.04% Time elapsed: 59.75 min Epoch: 067/150 | Batch 0000/0351 | Loss: 0.1505 Epoch: 067/150 | Batch 0100/0351 | Loss: 0.1159 Epoch: 067/150 | Batch 0200/0351 | Loss: 0.1865 Epoch: 067/150 | Batch 0300/0351 | Loss: 0.3210 Epoch: 067/150 | Train: 91.89% | Validation: 80.84% | Best Validation (Ep. 066): 82.04% Time elapsed: 60.65 min Epoch: 068/150 | Batch 0000/0351 | Loss: 0.2929 Epoch: 068/150 | Batch 0100/0351 | Loss: 0.2161 Epoch: 068/150 | Batch 0200/0351 | Loss: 0.0927 Epoch: 068/150 | Batch 0300/0351 | Loss: 0.0946 Epoch: 068/150 | Train: 92.23% | Validation: 81.72% | Best Validation (Ep. 066): 82.04% Time elapsed: 61.53 min Epoch: 069/150 | Batch 0000/0351 | Loss: 0.2569 Epoch: 069/150 | Batch 0100/0351 | Loss: 0.2241 Epoch: 069/150 | Batch 0200/0351 | Loss: 0.2921 Epoch: 069/150 | Batch 0300/0351 | Loss: 0.1926 Epoch: 069/150 | Train: 90.67% | Validation: 79.34% | Best Validation (Ep. 066): 82.04% Time elapsed: 62.44 min Epoch: 070/150 | Batch 0000/0351 | Loss: 0.2141 Epoch: 070/150 | Batch 0100/0351 | Loss: 0.0739 Epoch: 070/150 | Batch 0200/0351 | Loss: 0.1878 Epoch: 070/150 | Batch 0300/0351 | Loss: 0.3598 Epoch: 070/150 | Train: 90.87% | Validation: 79.50% | Best Validation (Ep. 066): 82.04% Time elapsed: 63.33 min Epoch: 071/150 | Batch 0000/0351 | Loss: 0.0840 Epoch: 071/150 | Batch 0100/0351 | Loss: 0.1757 Epoch: 071/150 | Batch 0200/0351 | Loss: 0.1921 Epoch: 071/150 | Batch 0300/0351 | Loss: 0.1726 Epoch: 071/150 | Train: 92.80% | Validation: 81.06% | Best Validation (Ep. 066): 82.04% Time elapsed: 64.25 min Epoch: 072/150 | Batch 0000/0351 | Loss: 0.2505 Epoch: 072/150 | Batch 0100/0351 | Loss: 0.2130 Epoch: 072/150 | Batch 0200/0351 | Loss: 0.3150 Epoch: 072/150 | Batch 0300/0351 | Loss: 0.2344 Epoch: 072/150 | Train: 92.71% | Validation: 81.62% | Best Validation (Ep. 066): 82.04% Time elapsed: 65.15 min Epoch: 073/150 | Batch 0000/0351 | Loss: 0.1604 Epoch: 073/150 | Batch 0100/0351 | Loss: 0.0675 Epoch: 073/150 | Batch 0200/0351 | Loss: 0.1666 Epoch: 073/150 | Batch 0300/0351 | Loss: 0.2114 Epoch: 073/150 | Train: 91.10% | Validation: 80.38% | Best Validation (Ep. 066): 82.04% Time elapsed: 66.06 min Epoch: 074/150 | Batch 0000/0351 | Loss: 0.1462 Epoch: 074/150 | Batch 0100/0351 | Loss: 0.1404 Epoch: 074/150 | Batch 0200/0351 | Loss: 0.2590 Epoch: 074/150 | Batch 0300/0351 | Loss: 0.3946 Epoch: 074/150 | Train: 90.84% | Validation: 79.96% | Best Validation (Ep. 066): 82.04% Time elapsed: 66.99 min Epoch: 075/150 | Batch 0000/0351 | Loss: 0.1017 Epoch: 075/150 | Batch 0100/0351 | Loss: 0.1028 Epoch: 075/150 | Batch 0200/0351 | Loss: 0.1586 Epoch: 075/150 | Batch 0300/0351 | Loss: 0.2607 Epoch: 075/150 | Train: 91.96% | Validation: 80.58% | Best Validation (Ep. 066): 82.04% Time elapsed: 67.91 min Epoch: 076/150 | Batch 0000/0351 | Loss: 0.2107 Epoch: 076/150 | Batch 0100/0351 | Loss: 0.1301 Epoch: 076/150 | Batch 0200/0351 | Loss: 0.2444 Epoch: 076/150 | Batch 0300/0351 | Loss: 0.2098 Epoch: 076/150 | Train: 94.56% | Validation: 82.38% | Best Validation (Ep. 076): 82.38% Time elapsed: 68.83 min Epoch: 077/150 | Batch 0000/0351 | Loss: 0.1109 Epoch: 077/150 | Batch 0100/0351 | Loss: 0.2356 Epoch: 077/150 | Batch 0200/0351 | Loss: 0.1619 Epoch: 077/150 | Batch 0300/0351 | Loss: 0.1171 Epoch: 077/150 | Train: 92.38% | Validation: 81.24% | Best Validation (Ep. 076): 82.38% Time elapsed: 69.75 min Epoch: 078/150 | Batch 0000/0351 | Loss: 0.0935 Epoch: 078/150 | Batch 0100/0351 | Loss: 0.1236 Epoch: 078/150 | Batch 0200/0351 | Loss: 0.2984 Epoch: 078/150 | Batch 0300/0351 | Loss: 0.1144 Epoch: 078/150 | Train: 91.93% | Validation: 80.52% | Best Validation (Ep. 076): 82.38% Time elapsed: 70.65 min Epoch: 079/150 | Batch 0000/0351 | Loss: 0.0982 Epoch: 079/150 | Batch 0100/0351 | Loss: 0.3034 Epoch: 079/150 | Batch 0200/0351 | Loss: 0.3995 Epoch: 079/150 | Batch 0300/0351 | Loss: 0.1883 Epoch: 079/150 | Train: 93.56% | Validation: 81.10% | Best Validation (Ep. 076): 82.38% Time elapsed: 71.54 min Epoch: 080/150 | Batch 0000/0351 | Loss: 0.1905 Epoch: 080/150 | Batch 0100/0351 | Loss: 0.1965 Epoch: 080/150 | Batch 0200/0351 | Loss: 0.2067 Epoch: 080/150 | Batch 0300/0351 | Loss: 0.1541 Epoch: 080/150 | Train: 93.10% | Validation: 81.54% | Best Validation (Ep. 076): 82.38% Time elapsed: 72.45 min Epoch: 081/150 | Batch 0000/0351 | Loss: 0.1718 Epoch: 081/150 | Batch 0100/0351 | Loss: 0.1761 Epoch: 081/150 | Batch 0200/0351 | Loss: 0.1662 Epoch: 081/150 | Batch 0300/0351 | Loss: 0.1602 Epoch: 081/150 | Train: 94.04% | Validation: 81.80% | Best Validation (Ep. 076): 82.38% Time elapsed: 73.35 min Epoch: 082/150 | Batch 0000/0351 | Loss: 0.0817 Epoch: 082/150 | Batch 0100/0351 | Loss: 0.1749 Epoch: 082/150 | Batch 0200/0351 | Loss: 0.1817 Epoch: 082/150 | Batch 0300/0351 | Loss: 0.0904 Epoch: 082/150 | Train: 94.31% | Validation: 82.28% | Best Validation (Ep. 076): 82.38% Time elapsed: 74.25 min Epoch: 083/150 | Batch 0000/0351 | Loss: 0.0920 Epoch: 083/150 | Batch 0100/0351 | Loss: 0.2125 Epoch: 083/150 | Batch 0200/0351 | Loss: 0.2214 Epoch: 083/150 | Batch 0300/0351 | Loss: 0.1629 Epoch: 083/150 | Train: 92.48% | Validation: 80.70% | Best Validation (Ep. 076): 82.38% Time elapsed: 75.15 min Epoch: 084/150 | Batch 0000/0351 | Loss: 0.0891 Epoch: 084/150 | Batch 0100/0351 | Loss: 0.1995 Epoch: 084/150 | Batch 0200/0351 | Loss: 0.0829 Epoch: 084/150 | Batch 0300/0351 | Loss: 0.1156 Epoch: 084/150 | Train: 92.87% | Validation: 81.30% | Best Validation (Ep. 076): 82.38% Time elapsed: 76.06 min Epoch: 085/150 | Batch 0000/0351 | Loss: 0.1182 Epoch: 085/150 | Batch 0100/0351 | Loss: 0.0884 Epoch: 085/150 | Batch 0200/0351 | Loss: 0.1645 Epoch: 085/150 | Batch 0300/0351 | Loss: 0.1504 Epoch: 085/150 | Train: 93.82% | Validation: 81.24% | Best Validation (Ep. 076): 82.38% Time elapsed: 76.96 min Epoch: 086/150 | Batch 0000/0351 | Loss: 0.2146 Epoch: 086/150 | Batch 0100/0351 | Loss: 0.0712 Epoch: 086/150 | Batch 0200/0351 | Loss: 0.1569 Epoch: 086/150 | Batch 0300/0351 | Loss: 0.1016 Epoch: 086/150 | Train: 91.57% | Validation: 80.14% | Best Validation (Ep. 076): 82.38% Time elapsed: 77.86 min Epoch: 087/150 | Batch 0000/0351 | Loss: 0.0810 Epoch: 087/150 | Batch 0100/0351 | Loss: 0.1386 Epoch: 087/150 | Batch 0200/0351 | Loss: 0.1217 Epoch: 087/150 | Batch 0300/0351 | Loss: 0.1185 Epoch: 087/150 | Train: 92.52% | Validation: 80.64% | Best Validation (Ep. 076): 82.38% Time elapsed: 78.75 min Epoch: 088/150 | Batch 0000/0351 | Loss: 0.1278 Epoch: 088/150 | Batch 0100/0351 | Loss: 0.1622 Epoch: 088/150 | Batch 0200/0351 | Loss: 0.1776 Epoch: 088/150 | Batch 0300/0351 | Loss: 0.2380 Epoch: 088/150 | Train: 94.33% | Validation: 81.62% | Best Validation (Ep. 076): 82.38% Time elapsed: 79.67 min Epoch: 089/150 | Batch 0000/0351 | Loss: 0.1982 Epoch: 089/150 | Batch 0100/0351 | Loss: 0.0417 Epoch: 089/150 | Batch 0200/0351 | Loss: 0.1964 Epoch: 089/150 | Batch 0300/0351 | Loss: 0.2290 Epoch: 089/150 | Train: 94.04% | Validation: 81.44% | Best Validation (Ep. 076): 82.38% Time elapsed: 80.56 min Epoch: 090/150 | Batch 0000/0351 | Loss: 0.1250 Epoch: 090/150 | Batch 0100/0351 | Loss: 0.0682 Epoch: 090/150 | Batch 0200/0351 | Loss: 0.1032 Epoch: 090/150 | Batch 0300/0351 | Loss: 0.2191 Epoch: 090/150 | Train: 94.79% | Validation: 81.62% | Best Validation (Ep. 076): 82.38% Time elapsed: 81.49 min Epoch: 091/150 | Batch 0000/0351 | Loss: 0.0404 Epoch: 091/150 | Batch 0100/0351 | Loss: 0.0564 Epoch: 091/150 | Batch 0200/0351 | Loss: 0.0966 Epoch: 091/150 | Batch 0300/0351 | Loss: 0.1710 Epoch: 091/150 | Train: 91.47% | Validation: 79.66% | Best Validation (Ep. 076): 82.38% Time elapsed: 82.38 min Epoch: 092/150 | Batch 0000/0351 | Loss: 0.0731 Epoch: 092/150 | Batch 0100/0351 | Loss: 0.1221 Epoch: 092/150 | Batch 0200/0351 | Loss: 0.2794 Epoch: 092/150 | Batch 0300/0351 | Loss: 0.1209 Epoch: 092/150 | Train: 94.92% | Validation: 82.64% | Best Validation (Ep. 092): 82.64% Time elapsed: 83.30 min Epoch: 093/150 | Batch 0000/0351 | Loss: 0.2372 Epoch: 093/150 | Batch 0100/0351 | Loss: 0.1445 Epoch: 093/150 | Batch 0200/0351 | Loss: 0.1921 Epoch: 093/150 | Batch 0300/0351 | Loss: 0.0236 Epoch: 093/150 | Train: 95.14% | Validation: 82.30% | Best Validation (Ep. 092): 82.64% Time elapsed: 84.20 min Epoch: 094/150 | Batch 0000/0351 | Loss: 0.0785 Epoch: 094/150 | Batch 0100/0351 | Loss: 0.1909 Epoch: 094/150 | Batch 0200/0351 | Loss: 0.2125 Epoch: 094/150 | Batch 0300/0351 | Loss: 0.1625 Epoch: 094/150 | Train: 93.03% | Validation: 81.32% | Best Validation (Ep. 092): 82.64% Time elapsed: 85.09 min Epoch: 095/150 | Batch 0000/0351 | Loss: 0.0902 Epoch: 095/150 | Batch 0100/0351 | Loss: 0.2112 Epoch: 095/150 | Batch 0200/0351 | Loss: 0.0962 Epoch: 095/150 | Batch 0300/0351 | Loss: 0.1209 Epoch: 095/150 | Train: 95.00% | Validation: 81.70% | Best Validation (Ep. 092): 82.64% Time elapsed: 85.97 min Epoch: 096/150 | Batch 0000/0351 | Loss: 0.1120 Epoch: 096/150 | Batch 0100/0351 | Loss: 0.0436 Epoch: 096/150 | Batch 0200/0351 | Loss: 0.1590 Epoch: 096/150 | Batch 0300/0351 | Loss: 0.2382 Epoch: 096/150 | Train: 93.98% | Validation: 80.34% | Best Validation (Ep. 092): 82.64% Time elapsed: 86.87 min Epoch: 097/150 | Batch 0000/0351 | Loss: 0.1589 Epoch: 097/150 | Batch 0100/0351 | Loss: 0.1190 Epoch: 097/150 | Batch 0200/0351 | Loss: 0.1504 Epoch: 097/150 | Batch 0300/0351 | Loss: 0.1645 Epoch: 097/150 | Train: 95.33% | Validation: 82.10% | Best Validation (Ep. 092): 82.64% Time elapsed: 87.77 min Epoch: 098/150 | Batch 0000/0351 | Loss: 0.1420 Epoch: 098/150 | Batch 0100/0351 | Loss: 0.0377 Epoch: 098/150 | Batch 0200/0351 | Loss: 0.0996 Epoch: 098/150 | Batch 0300/0351 | Loss: 0.1032 Epoch: 098/150 | Train: 94.62% | Validation: 81.36% | Best Validation (Ep. 092): 82.64% Time elapsed: 88.67 min Epoch: 099/150 | Batch 0000/0351 | Loss: 0.0391 Epoch: 099/150 | Batch 0100/0351 | Loss: 0.1606 Epoch: 099/150 | Batch 0200/0351 | Loss: 0.2073 Epoch: 099/150 | Batch 0300/0351 | Loss: 0.2402 Epoch: 099/150 | Train: 92.88% | Validation: 80.32% | Best Validation (Ep. 092): 82.64% Time elapsed: 89.57 min Epoch: 100/150 | Batch 0000/0351 | Loss: 0.0726 Epoch: 100/150 | Batch 0100/0351 | Loss: 0.1524 Epoch: 100/150 | Batch 0200/0351 | Loss: 0.1746 Epoch: 100/150 | Batch 0300/0351 | Loss: 0.1906 Epoch: 100/150 | Train: 95.93% | Validation: 82.74% | Best Validation (Ep. 100): 82.74% Time elapsed: 90.50 min Epoch: 101/150 | Batch 0000/0351 | Loss: 0.0858 Epoch: 101/150 | Batch 0100/0351 | Loss: 0.1001 Epoch: 101/150 | Batch 0200/0351 | Loss: 0.1650 Epoch: 101/150 | Batch 0300/0351 | Loss: 0.1597 Epoch: 101/150 | Train: 93.37% | Validation: 80.10% | Best Validation (Ep. 100): 82.74% Time elapsed: 91.41 min Epoch: 102/150 | Batch 0000/0351 | Loss: 0.0797 Epoch: 102/150 | Batch 0100/0351 | Loss: 0.1231 Epoch: 102/150 | Batch 0200/0351 | Loss: 0.1798 Epoch: 102/150 | Batch 0300/0351 | Loss: 0.1443 Epoch: 102/150 | Train: 95.90% | Validation: 82.84% | Best Validation (Ep. 102): 82.84% Time elapsed: 92.31 min Epoch: 103/150 | Batch 0000/0351 | Loss: 0.0936 Epoch: 103/150 | Batch 0100/0351 | Loss: 0.1001 Epoch: 103/150 | Batch 0200/0351 | Loss: 0.1355 Epoch: 103/150 | Batch 0300/0351 | Loss: 0.0678 Epoch: 103/150 | Train: 94.93% | Validation: 82.00% | Best Validation (Ep. 102): 82.84% Time elapsed: 93.23 min Epoch: 104/150 | Batch 0000/0351 | Loss: 0.1362 Epoch: 104/150 | Batch 0100/0351 | Loss: 0.1208 Epoch: 104/150 | Batch 0200/0351 | Loss: 0.1131 Epoch: 104/150 | Batch 0300/0351 | Loss: 0.1619 Epoch: 104/150 | Train: 93.76% | Validation: 81.62% | Best Validation (Ep. 102): 82.84% Time elapsed: 94.15 min Epoch: 105/150 | Batch 0000/0351 | Loss: 0.2997 Epoch: 105/150 | Batch 0100/0351 | Loss: 0.1736 Epoch: 105/150 | Batch 0200/0351 | Loss: 0.0378 Epoch: 105/150 | Batch 0300/0351 | Loss: 0.1024 Epoch: 105/150 | Train: 94.19% | Validation: 81.70% | Best Validation (Ep. 102): 82.84% Time elapsed: 95.06 min Epoch: 106/150 | Batch 0000/0351 | Loss: 0.0546 Epoch: 106/150 | Batch 0100/0351 | Loss: 0.1106 Epoch: 106/150 | Batch 0200/0351 | Loss: 0.2450 Epoch: 106/150 | Batch 0300/0351 | Loss: 0.1042 Epoch: 106/150 | Train: 92.87% | Validation: 80.20% | Best Validation (Ep. 102): 82.84% Time elapsed: 95.95 min Epoch: 107/150 | Batch 0000/0351 | Loss: 0.1666 Epoch: 107/150 | Batch 0100/0351 | Loss: 0.0926 Epoch: 107/150 | Batch 0200/0351 | Loss: 0.0725 Epoch: 107/150 | Batch 0300/0351 | Loss: 0.2038 Epoch: 107/150 | Train: 95.00% | Validation: 82.18% | Best Validation (Ep. 102): 82.84% Time elapsed: 96.87 min Epoch: 108/150 | Batch 0000/0351 | Loss: 0.0915 Epoch: 108/150 | Batch 0100/0351 | Loss: 0.1389 Epoch: 108/150 | Batch 0200/0351 | Loss: 0.1174 Epoch: 108/150 | Batch 0300/0351 | Loss: 0.1151 Epoch: 108/150 | Train: 96.44% | Validation: 82.80% | Best Validation (Ep. 102): 82.84% Time elapsed: 97.79 min Epoch: 109/150 | Batch 0000/0351 | Loss: 0.0689 Epoch: 109/150 | Batch 0100/0351 | Loss: 0.0420 Epoch: 109/150 | Batch 0200/0351 | Loss: 0.0289 Epoch: 109/150 | Batch 0300/0351 | Loss: 0.1547 Epoch: 109/150 | Train: 93.97% | Validation: 81.72% | Best Validation (Ep. 102): 82.84% Time elapsed: 98.70 min Epoch: 110/150 | Batch 0000/0351 | Loss: 0.0867 Epoch: 110/150 | Batch 0100/0351 | Loss: 0.0985 Epoch: 110/150 | Batch 0200/0351 | Loss: 0.0578 Epoch: 110/150 | Batch 0300/0351 | Loss: 0.1379 Epoch: 110/150 | Train: 94.69% | Validation: 81.26% | Best Validation (Ep. 102): 82.84% Time elapsed: 99.59 min Epoch: 111/150 | Batch 0000/0351 | Loss: 0.1124 Epoch: 111/150 | Batch 0100/0351 | Loss: 0.1078 Epoch: 111/150 | Batch 0200/0351 | Loss: 0.1815 Epoch: 111/150 | Batch 0300/0351 | Loss: 0.0390 Epoch: 111/150 | Train: 93.29% | Validation: 80.84% | Best Validation (Ep. 102): 82.84% Time elapsed: 100.48 min Epoch: 112/150 | Batch 0000/0351 | Loss: 0.1794 Epoch: 112/150 | Batch 0100/0351 | Loss: 0.1271 Epoch: 112/150 | Batch 0200/0351 | Loss: 0.0735 Epoch: 112/150 | Batch 0300/0351 | Loss: 0.1526 Epoch: 112/150 | Train: 93.34% | Validation: 80.12% | Best Validation (Ep. 102): 82.84% Time elapsed: 101.39 min Epoch: 113/150 | Batch 0000/0351 | Loss: 0.0799 Epoch: 113/150 | Batch 0100/0351 | Loss: 0.1476 Epoch: 113/150 | Batch 0200/0351 | Loss: 0.1530 Epoch: 113/150 | Batch 0300/0351 | Loss: 0.3245 Epoch: 113/150 | Train: 87.54% | Validation: 77.78% | Best Validation (Ep. 102): 82.84% Time elapsed: 102.28 min Epoch: 114/150 | Batch 0000/0351 | Loss: 0.1217 Epoch: 114/150 | Batch 0100/0351 | Loss: 0.1496 Epoch: 114/150 | Batch 0200/0351 | Loss: 0.1991 Epoch: 114/150 | Batch 0300/0351 | Loss: 0.1456 Epoch: 114/150 | Train: 95.37% | Validation: 81.98% | Best Validation (Ep. 102): 82.84% Time elapsed: 103.18 min Epoch: 115/150 | Batch 0000/0351 | Loss: 0.1294 Epoch: 115/150 | Batch 0100/0351 | Loss: 0.0696 Epoch: 115/150 | Batch 0200/0351 | Loss: 0.1204 Epoch: 115/150 | Batch 0300/0351 | Loss: 0.1319 Epoch: 115/150 | Train: 96.50% | Validation: 83.10% | Best Validation (Ep. 115): 83.10% Time elapsed: 104.08 min Epoch: 116/150 | Batch 0000/0351 | Loss: 0.0304 Epoch: 116/150 | Batch 0100/0351 | Loss: 0.1355 Epoch: 116/150 | Batch 0200/0351 | Loss: 0.0292 Epoch: 116/150 | Batch 0300/0351 | Loss: 0.2512 Epoch: 116/150 | Train: 95.46% | Validation: 81.92% | Best Validation (Ep. 115): 83.10% Time elapsed: 105.00 min Epoch: 117/150 | Batch 0000/0351 | Loss: 0.0663 Epoch: 117/150 | Batch 0100/0351 | Loss: 0.1144 Epoch: 117/150 | Batch 0200/0351 | Loss: 0.0905 Epoch: 117/150 | Batch 0300/0351 | Loss: 0.0798 Epoch: 117/150 | Train: 94.79% | Validation: 81.90% | Best Validation (Ep. 115): 83.10% Time elapsed: 105.91 min Epoch: 118/150 | Batch 0000/0351 | Loss: 0.1058 Epoch: 118/150 | Batch 0100/0351 | Loss: 0.0485 Epoch: 118/150 | Batch 0200/0351 | Loss: 0.2135 Epoch: 118/150 | Batch 0300/0351 | Loss: 0.0855 Epoch: 118/150 | Train: 94.41% | Validation: 81.80% | Best Validation (Ep. 115): 83.10% Time elapsed: 106.82 min Epoch: 119/150 | Batch 0000/0351 | Loss: 0.1377 Epoch: 119/150 | Batch 0100/0351 | Loss: 0.0731 Epoch: 119/150 | Batch 0200/0351 | Loss: 0.1700 Epoch: 119/150 | Batch 0300/0351 | Loss: 0.1481 Epoch: 119/150 | Train: 86.87% | Validation: 76.78% | Best Validation (Ep. 115): 83.10% Time elapsed: 107.73 min Epoch: 120/150 | Batch 0000/0351 | Loss: 0.2492 Epoch: 120/150 | Batch 0100/0351 | Loss: 0.0990 Epoch: 120/150 | Batch 0200/0351 | Loss: 0.1329 Epoch: 120/150 | Batch 0300/0351 | Loss: 0.2055 Epoch: 120/150 | Train: 96.79% | Validation: 83.14% | Best Validation (Ep. 120): 83.14% Time elapsed: 108.63 min Epoch: 121/150 | Batch 0000/0351 | Loss: 0.1659 Epoch: 121/150 | Batch 0100/0351 | Loss: 0.0806 Epoch: 121/150 | Batch 0200/0351 | Loss: 0.0725 Epoch: 121/150 | Batch 0300/0351 | Loss: 0.0458 Epoch: 121/150 | Train: 95.18% | Validation: 81.30% | Best Validation (Ep. 120): 83.14% Time elapsed: 109.55 min Epoch: 122/150 | Batch 0000/0351 | Loss: 0.0255 Epoch: 122/150 | Batch 0100/0351 | Loss: 0.0505 Epoch: 122/150 | Batch 0200/0351 | Loss: 0.0403 Epoch: 122/150 | Batch 0300/0351 | Loss: 0.1100 Epoch: 122/150 | Train: 96.72% | Validation: 82.98% | Best Validation (Ep. 120): 83.14% Time elapsed: 110.47 min Epoch: 123/150 | Batch 0000/0351 | Loss: 0.0391 Epoch: 123/150 | Batch 0100/0351 | Loss: 0.0547 Epoch: 123/150 | Batch 0200/0351 | Loss: 0.1412 Epoch: 123/150 | Batch 0300/0351 | Loss: 0.0297 Epoch: 123/150 | Train: 96.69% | Validation: 82.80% | Best Validation (Ep. 120): 83.14% Time elapsed: 111.37 min Epoch: 124/150 | Batch 0000/0351 | Loss: 0.0750 Epoch: 124/150 | Batch 0100/0351 | Loss: 0.0069 Epoch: 124/150 | Batch 0200/0351 | Loss: 0.0485 Epoch: 124/150 | Batch 0300/0351 | Loss: 0.0901 Epoch: 124/150 | Train: 94.90% | Validation: 82.04% | Best Validation (Ep. 120): 83.14% Time elapsed: 112.28 min Epoch: 125/150 | Batch 0000/0351 | Loss: 0.0982 Epoch: 125/150 | Batch 0100/0351 | Loss: 0.2261 Epoch: 125/150 | Batch 0200/0351 | Loss: 0.1558 Epoch: 125/150 | Batch 0300/0351 | Loss: 0.1389 Epoch: 125/150 | Train: 95.00% | Validation: 81.74% | Best Validation (Ep. 120): 83.14% Time elapsed: 113.19 min Epoch: 126/150 | Batch 0000/0351 | Loss: 0.0657 Epoch: 126/150 | Batch 0100/0351 | Loss: 0.0491 Epoch: 126/150 | Batch 0200/0351 | Loss: 0.0675 Epoch: 126/150 | Batch 0300/0351 | Loss: 0.0909 Epoch: 126/150 | Train: 95.15% | Validation: 81.96% | Best Validation (Ep. 120): 83.14% Time elapsed: 114.12 min Epoch: 127/150 | Batch 0000/0351 | Loss: 0.1183 Epoch: 127/150 | Batch 0100/0351 | Loss: 0.0802 Epoch: 127/150 | Batch 0200/0351 | Loss: 0.0481 Epoch: 127/150 | Batch 0300/0351 | Loss: 0.1392 Epoch: 127/150 | Train: 93.40% | Validation: 80.72% | Best Validation (Ep. 120): 83.14% Time elapsed: 115.02 min Epoch: 128/150 | Batch 0000/0351 | Loss: 0.0932 Epoch: 128/150 | Batch 0100/0351 | Loss: 0.0984 Epoch: 128/150 | Batch 0200/0351 | Loss: 0.1158 Epoch: 128/150 | Batch 0300/0351 | Loss: 0.0775 Epoch: 128/150 | Train: 96.91% | Validation: 83.68% | Best Validation (Ep. 128): 83.68% Time elapsed: 115.93 min Epoch: 129/150 | Batch 0000/0351 | Loss: 0.0557 Epoch: 129/150 | Batch 0100/0351 | Loss: 0.1715 Epoch: 129/150 | Batch 0200/0351 | Loss: 0.1268 Epoch: 129/150 | Batch 0300/0351 | Loss: 0.1197 Epoch: 129/150 | Train: 95.85% | Validation: 82.80% | Best Validation (Ep. 128): 83.68% Time elapsed: 116.84 min Epoch: 130/150 | Batch 0000/0351 | Loss: 0.0283 Epoch: 130/150 | Batch 0100/0351 | Loss: 0.0346 Epoch: 130/150 | Batch 0200/0351 | Loss: 0.1274 Epoch: 130/150 | Batch 0300/0351 | Loss: 0.1212 Epoch: 130/150 | Train: 96.33% | Validation: 82.72% | Best Validation (Ep. 128): 83.68% Time elapsed: 117.74 min Epoch: 131/150 | Batch 0000/0351 | Loss: 0.0414 Epoch: 131/150 | Batch 0100/0351 | Loss: 0.0656 Epoch: 131/150 | Batch 0200/0351 | Loss: 0.0915 Epoch: 131/150 | Batch 0300/0351 | Loss: 0.0546 Epoch: 131/150 | Train: 96.20% | Validation: 82.58% | Best Validation (Ep. 128): 83.68% Time elapsed: 118.66 min Epoch: 132/150 | Batch 0000/0351 | Loss: 0.1191 Epoch: 132/150 | Batch 0100/0351 | Loss: 0.0572 Epoch: 132/150 | Batch 0200/0351 | Loss: 0.1927 Epoch: 132/150 | Batch 0300/0351 | Loss: 0.2336 Epoch: 132/150 | Train: 93.28% | Validation: 81.10% | Best Validation (Ep. 128): 83.68% Time elapsed: 119.56 min Epoch: 133/150 | Batch 0000/0351 | Loss: 0.0891 Epoch: 133/150 | Batch 0100/0351 | Loss: 0.2360 Epoch: 133/150 | Batch 0200/0351 | Loss: 0.1204 Epoch: 133/150 | Batch 0300/0351 | Loss: 0.0379 Epoch: 133/150 | Train: 96.25% | Validation: 81.84% | Best Validation (Ep. 128): 83.68% Time elapsed: 120.47 min Epoch: 134/150 | Batch 0000/0351 | Loss: 0.1655 Epoch: 134/150 | Batch 0100/0351 | Loss: 0.0333 Epoch: 134/150 | Batch 0200/0351 | Loss: 0.1526 Epoch: 134/150 | Batch 0300/0351 | Loss: 0.0803 Epoch: 134/150 | Train: 97.48% | Validation: 82.52% | Best Validation (Ep. 128): 83.68% Time elapsed: 121.37 min Epoch: 135/150 | Batch 0000/0351 | Loss: 0.0601 Epoch: 135/150 | Batch 0100/0351 | Loss: 0.0403 Epoch: 135/150 | Batch 0200/0351 | Loss: 0.0317 Epoch: 135/150 | Batch 0300/0351 | Loss: 0.1724 Epoch: 135/150 | Train: 96.09% | Validation: 83.20% | Best Validation (Ep. 128): 83.68% Time elapsed: 122.28 min Epoch: 136/150 | Batch 0000/0351 | Loss: 0.1389 Epoch: 136/150 | Batch 0100/0351 | Loss: 0.0485 Epoch: 136/150 | Batch 0200/0351 | Loss: 0.0424 Epoch: 136/150 | Batch 0300/0351 | Loss: 0.0850 Epoch: 136/150 | Train: 95.39% | Validation: 81.46% | Best Validation (Ep. 128): 83.68% Time elapsed: 123.19 min Epoch: 137/150 | Batch 0000/0351 | Loss: 0.0660 Epoch: 137/150 | Batch 0100/0351 | Loss: 0.0195 Epoch: 137/150 | Batch 0200/0351 | Loss: 0.0687 Epoch: 137/150 | Batch 0300/0351 | Loss: 0.1307 Epoch: 137/150 | Train: 95.47% | Validation: 81.46% | Best Validation (Ep. 128): 83.68% Time elapsed: 124.10 min Epoch: 138/150 | Batch 0000/0351 | Loss: 0.0563 Epoch: 138/150 | Batch 0100/0351 | Loss: 0.0946 Epoch: 138/150 | Batch 0200/0351 | Loss: 0.1287 Epoch: 138/150 | Batch 0300/0351 | Loss: 0.1434 Epoch: 138/150 | Train: 92.14% | Validation: 79.82% | Best Validation (Ep. 128): 83.68% Time elapsed: 125.00 min Epoch: 139/150 | Batch 0000/0351 | Loss: 0.1940 Epoch: 139/150 | Batch 0100/0351 | Loss: 0.0611 Epoch: 139/150 | Batch 0200/0351 | Loss: 0.0174 Epoch: 139/150 | Batch 0300/0351 | Loss: 0.1371 Epoch: 139/150 | Train: 97.69% | Validation: 83.16% | Best Validation (Ep. 128): 83.68% Time elapsed: 125.90 min Epoch: 140/150 | Batch 0000/0351 | Loss: 0.0710 Epoch: 140/150 | Batch 0100/0351 | Loss: 0.1977 Epoch: 140/150 | Batch 0200/0351 | Loss: 0.0838 Epoch: 140/150 | Batch 0300/0351 | Loss: 0.0385 Epoch: 140/150 | Train: 96.69% | Validation: 82.38% | Best Validation (Ep. 128): 83.68% Time elapsed: 126.81 min Epoch: 141/150 | Batch 0000/0351 | Loss: 0.0406 Epoch: 141/150 | Batch 0100/0351 | Loss: 0.0262 Epoch: 141/150 | Batch 0200/0351 | Loss: 0.0543 Epoch: 141/150 | Batch 0300/0351 | Loss: 0.0654 Epoch: 141/150 | Train: 97.80% | Validation: 84.10% | Best Validation (Ep. 141): 84.10% Time elapsed: 127.71 min Epoch: 142/150 | Batch 0000/0351 | Loss: 0.0626 Epoch: 142/150 | Batch 0100/0351 | Loss: 0.1202 Epoch: 142/150 | Batch 0200/0351 | Loss: 0.1296 Epoch: 142/150 | Batch 0300/0351 | Loss: 0.1729 Epoch: 142/150 | Train: 95.42% | Validation: 82.64% | Best Validation (Ep. 141): 84.10% Time elapsed: 128.61 min Epoch: 143/150 | Batch 0000/0351 | Loss: 0.0762 Epoch: 143/150 | Batch 0100/0351 | Loss: 0.1659 Epoch: 143/150 | Batch 0200/0351 | Loss: 0.1049 Epoch: 143/150 | Batch 0300/0351 | Loss: 0.1900 Epoch: 143/150 | Train: 95.98% | Validation: 82.36% | Best Validation (Ep. 141): 84.10% Time elapsed: 129.51 min Epoch: 144/150 | Batch 0000/0351 | Loss: 0.0576 Epoch: 144/150 | Batch 0100/0351 | Loss: 0.1375 Epoch: 144/150 | Batch 0200/0351 | Loss: 0.0754 Epoch: 144/150 | Batch 0300/0351 | Loss: 0.2755 Epoch: 144/150 | Train: 97.39% | Validation: 83.20% | Best Validation (Ep. 141): 84.10% Time elapsed: 130.43 min Epoch: 145/150 | Batch 0000/0351 | Loss: 0.0937 Epoch: 145/150 | Batch 0100/0351 | Loss: 0.0298 Epoch: 145/150 | Batch 0200/0351 | Loss: 0.1405 Epoch: 145/150 | Batch 0300/0351 | Loss: 0.1688 Epoch: 145/150 | Train: 97.08% | Validation: 83.08% | Best Validation (Ep. 141): 84.10% Time elapsed: 131.34 min Epoch: 146/150 | Batch 0000/0351 | Loss: 0.0034 Epoch: 146/150 | Batch 0100/0351 | Loss: 0.0978 Epoch: 146/150 | Batch 0200/0351 | Loss: 0.0811 Epoch: 146/150 | Batch 0300/0351 | Loss: 0.0107 Epoch: 146/150 | Train: 95.05% | Validation: 81.90% | Best Validation (Ep. 141): 84.10% Time elapsed: 132.25 min Epoch: 147/150 | Batch 0000/0351 | Loss: 0.0606 Epoch: 147/150 | Batch 0100/0351 | Loss: 0.0597 Epoch: 147/150 | Batch 0200/0351 | Loss: 0.1139 Epoch: 147/150 | Batch 0300/0351 | Loss: 0.1187 Epoch: 147/150 | Train: 94.78% | Validation: 81.88% | Best Validation (Ep. 141): 84.10% Time elapsed: 133.16 min Epoch: 148/150 | Batch 0000/0351 | Loss: 0.0951 Epoch: 148/150 | Batch 0100/0351 | Loss: 0.1681 Epoch: 148/150 | Batch 0200/0351 | Loss: 0.1229 Epoch: 148/150 | Batch 0300/0351 | Loss: 0.2133 Epoch: 148/150 | Train: 96.54% | Validation: 82.70% | Best Validation (Ep. 141): 84.10% Time elapsed: 134.07 min Epoch: 149/150 | Batch 0000/0351 | Loss: 0.0888 Epoch: 149/150 | Batch 0100/0351 | Loss: 0.1669 Epoch: 149/150 | Batch 0200/0351 | Loss: 0.0747 Epoch: 149/150 | Batch 0300/0351 | Loss: 0.1029 Epoch: 149/150 | Train: 94.90% | Validation: 81.38% | Best Validation (Ep. 141): 84.10% Time elapsed: 134.97 min Epoch: 150/150 | Batch 0000/0351 | Loss: 0.1235 Epoch: 150/150 | Batch 0100/0351 | Loss: 0.1805 Epoch: 150/150 | Batch 0200/0351 | Loss: 0.1566 Epoch: 150/150 | Batch 0300/0351 | Loss: 0.2097 Epoch: 150/150 | Train: 95.37% | Validation: 81.92% | Best Validation (Ep. 141): 84.10% Time elapsed: 135.89 min Total Training Time: 135.89 min Test accuracy 81.59%
model.load_state_dict(torch.load('mobilenet-v2-best-1.pt'))
model.eval()
test_acc = compute_accuracy(model, test_loader, device=DEVICE)
print(f'Test accuracy: {test_acc:.2f}%')
Test accuracy: 82.86%
model.cpu()
unnormalizer = UnNormalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
class_dict = {0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'}
show_examples(model=model, data_loader=test_loader, unnormalizer=unnormalizer, class_dict=class_dict)
mat = compute_confusion_matrix(model=model, data_loader=test_loader, device=torch.device('cpu'))
plot_confusion_matrix(mat, class_names=class_dict.values())
plt.show()