Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
Sebastian Raschka CPython 3.6.7 IPython 7.1.1 torch 0.4.1
There are more sophisticated ways to plot the performance of a model during training than using matplotlib. However, sometimes, we just want to keep it simple, and this notebook shows how we can create a simple plotting function with matplotlib that updates every epoch.
A simple example of this is shown below (of course, we could extend it to show e.g., a grid of multiple plots):
import numpy as np
from matplotlib import pyplot as plt
class LivePerformanceplot(object):
def __init__(self, labels, xlim='auto', ylim='auto', xlabel='Epoch', ylabel='Performance'):
self.xlim = xlim
self.ylim = ylim
fig, ax = plt.subplots(1, 1)
self.ax = ax
self.fig = fig
self.labels = labels
for label in self.labels:
self.ax.plot([0], label=label)
if not xlim == 'auto':
self.ax.set_xlim(self.xlim)
if not ylim == 'auto':
self.ax.set_ylim(self.ylim)
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
self.ax.legend()
self.max = -np.inf
self.min = np.inf
self.num_iter = 0
def update(self, data_dict):
self.num_iter += 1
if self.xlim == 'auto':
self.ax.set_xlim(0, self.num_iter)
changed = False
for i, label in enumerate(self.labels):
line = self.ax.lines[i]
line.set_xdata(data_dict[label][0])
line.set_ydata(data_dict[label][1])
recent_y_value = data_dict[label][1][-1]
if self.ylim == 'auto':
changed = False
if recent_y_value > self.max:
self.max = recent_y_value
changed = True
if recent_y_value < self.min:
self.min = recent_y_value
changed = True
if changed:
self.ax.set_ylim(self.min - self.min*0.05, self.max + self.max*0.05)
self.fig.canvas.draw()
########## Testing
import time
%matplotlib notebook
plot = LivePerformanceplot(labels=['train acc.', 'valid acc.'])
x1 = []
y1 = []
x2 = []
y2 = []
for i in range(10):
time.sleep(2)
x1.append(i)
y1.append(i+1)
x2.append(i)
y2.append(i-1)
data_dict = {'train acc.': [x1, y1], 'valid acc.': [x2, y2]}
plot.update(data_dict=data_dict)
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
##########################
### SETTINGS
##########################
# Device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Hyperparameters
random_seed = 1
learning_rate = 0.05
num_epochs = 10
batch_size = 128
# Architecture
num_classes = 10
##########################
### MNIST DATASET
##########################
# Note transforms.ToTensor() scales input images
# to 0-1 range
train_dataset = datasets.MNIST(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
Image batch dimensions: torch.Size([128, 1, 28, 28]) Image label dimensions: torch.Size([128])
##########################
### MODEL
##########################
class ConvNet(torch.nn.Module):
def __init__(self, num_classes):
super(ConvNet, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
# 28x28x1 => 28x28x4
self.conv_1 = torch.nn.Conv2d(in_channels=1,
out_channels=4,
kernel_size=(3, 3),
stride=(1, 1),
padding=1) # (1(28-1) - 28 + 3) / 2 = 1
# 28x28x4 => 14x14x4
self.pool_1 = torch.nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2),
padding=0) # (2(14-1) - 28 + 2) = 0
# 14x14x4 => 14x14x8
self.conv_2 = torch.nn.Conv2d(in_channels=4,
out_channels=8,
kernel_size=(3, 3),
stride=(1, 1),
padding=1) # (1(14-1) - 14 + 3) / 2 = 1
# 14x14x8 => 7x7x8
self.pool_2 = torch.nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2),
padding=0) # (2(7-1) - 14 + 2) = 0
self.linear_1 = torch.nn.Linear(7*7*8, num_classes)
def forward(self, x):
out = self.conv_1(x)
out = F.relu(out)
out = self.pool_1(out)
out = self.conv_2(out)
out = F.relu(out)
out = self.pool_2(out)
logits = self.linear_1(out.view(-1, 7*7*8))
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(random_seed)
model = ConvNet(num_classes=num_classes)
model = model.to(device)
##########################
### COST AND OPTIMIZER
##########################
cost_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
def compute_accuracy(model, data_loader):
correct_pred, num_examples = 0, 0
for features, targets in data_loader:
features = features.to(device)
targets = targets.to(device)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
minibatch_costs = []
%matplotlib notebook
plot = LivePerformanceplot(labels=['Minibatch Cost'],
xlabel='Iteration')
for epoch in range(num_epochs):
model = model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = cost_fn(logits, targets)
minibatch_costs.append(cost.detach().cpu().numpy())
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### UPDATE PLOT
data_dict = {'Minibatch Cost': (range(len(minibatch_costs)), minibatch_costs)}
plot.update(data_dict=data_dict)
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
model = model.eval()
print('Epoch: %03d/%03d training accuracy: %.2f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader)))
Epoch: 001/010 | Batch 000/469 | Cost: 2.3016 Epoch: 001/010 | Batch 050/469 | Cost: 2.2714 Epoch: 001/010 | Batch 100/469 | Cost: 1.6118 Epoch: 001/010 | Batch 150/469 | Cost: 0.8000 Epoch: 001/010 | Batch 200/469 | Cost: 0.5079 Epoch: 001/010 | Batch 250/469 | Cost: 0.3221 Epoch: 001/010 | Batch 300/469 | Cost: 0.2851 Epoch: 001/010 | Batch 350/469 | Cost: 0.3117 Epoch: 001/010 | Batch 400/469 | Cost: 0.2836 Epoch: 001/010 | Batch 450/469 | Cost: 0.3169 Epoch: 001/010 training accuracy: 92.72% Epoch: 002/010 | Batch 000/469 | Cost: 0.2469 Epoch: 002/010 | Batch 050/469 | Cost: 0.2342 Epoch: 002/010 | Batch 100/469 | Cost: 0.2883 Epoch: 002/010 | Batch 150/469 | Cost: 0.2920 Epoch: 002/010 | Batch 200/469 | Cost: 0.1797 Epoch: 002/010 | Batch 250/469 | Cost: 0.2277 Epoch: 002/010 | Batch 300/469 | Cost: 0.1746 Epoch: 002/010 | Batch 350/469 | Cost: 0.2430 Epoch: 002/010 | Batch 400/469 | Cost: 0.1579 Epoch: 002/010 | Batch 450/469 | Cost: 0.1279 Epoch: 002/010 training accuracy: 95.07% Epoch: 003/010 | Batch 000/469 | Cost: 0.1224 Epoch: 003/010 | Batch 050/469 | Cost: 0.1998 Epoch: 003/010 | Batch 100/469 | Cost: 0.2211 Epoch: 003/010 | Batch 150/469 | Cost: 0.0906 Epoch: 003/010 | Batch 200/469 | Cost: 0.1502 Epoch: 003/010 | Batch 250/469 | Cost: 0.2392 Epoch: 003/010 | Batch 300/469 | Cost: 0.1108 Epoch: 003/010 | Batch 350/469 | Cost: 0.1736 Epoch: 003/010 | Batch 400/469 | Cost: 0.1428 Epoch: 003/010 | Batch 450/469 | Cost: 0.1253 Epoch: 003/010 training accuracy: 96.22% Epoch: 004/010 | Batch 000/469 | Cost: 0.1369 Epoch: 004/010 | Batch 050/469 | Cost: 0.1984 Epoch: 004/010 | Batch 100/469 | Cost: 0.1297 Epoch: 004/010 | Batch 150/469 | Cost: 0.1437 Epoch: 004/010 | Batch 200/469 | Cost: 0.1140 Epoch: 004/010 | Batch 250/469 | Cost: 0.0566 Epoch: 004/010 | Batch 300/469 | Cost: 0.1120 Epoch: 004/010 | Batch 350/469 | Cost: 0.1777 Epoch: 004/010 | Batch 400/469 | Cost: 0.2209 Epoch: 004/010 | Batch 450/469 | Cost: 0.1390 Epoch: 004/010 training accuracy: 96.77% Epoch: 005/010 | Batch 000/469 | Cost: 0.1306 Epoch: 005/010 | Batch 050/469 | Cost: 0.0445 Epoch: 005/010 | Batch 100/469 | Cost: 0.1327 Epoch: 005/010 | Batch 150/469 | Cost: 0.0846 Epoch: 005/010 | Batch 200/469 | Cost: 0.0759 Epoch: 005/010 | Batch 250/469 | Cost: 0.0796 Epoch: 005/010 | Batch 300/469 | Cost: 0.1364 Epoch: 005/010 | Batch 350/469 | Cost: 0.1421 Epoch: 005/010 | Batch 400/469 | Cost: 0.0904 Epoch: 005/010 | Batch 450/469 | Cost: 0.0598 Epoch: 005/010 training accuracy: 97.15% Epoch: 006/010 | Batch 000/469 | Cost: 0.0723 Epoch: 006/010 | Batch 050/469 | Cost: 0.0481 Epoch: 006/010 | Batch 100/469 | Cost: 0.0386 Epoch: 006/010 | Batch 150/469 | Cost: 0.0420 Epoch: 006/010 | Batch 200/469 | Cost: 0.1176 Epoch: 006/010 | Batch 250/469 | Cost: 0.0718 Epoch: 006/010 | Batch 300/469 | Cost: 0.0537 Epoch: 006/010 | Batch 350/469 | Cost: 0.0231 Epoch: 006/010 | Batch 400/469 | Cost: 0.0939 Epoch: 006/010 | Batch 450/469 | Cost: 0.0848 Epoch: 006/010 training accuracy: 97.43% Epoch: 007/010 | Batch 000/469 | Cost: 0.1984 Epoch: 007/010 | Batch 050/469 | Cost: 0.0445 Epoch: 007/010 | Batch 100/469 | Cost: 0.0525 Epoch: 007/010 | Batch 150/469 | Cost: 0.0640 Epoch: 007/010 | Batch 200/469 | Cost: 0.0669 Epoch: 007/010 | Batch 250/469 | Cost: 0.0952 Epoch: 007/010 | Batch 300/469 | Cost: 0.0293 Epoch: 007/010 | Batch 350/469 | Cost: 0.0972 Epoch: 007/010 | Batch 400/469 | Cost: 0.1133 Epoch: 007/010 | Batch 450/469 | Cost: 0.0554 Epoch: 007/010 training accuracy: 97.77% Epoch: 008/010 | Batch 000/469 | Cost: 0.1194 Epoch: 008/010 | Batch 050/469 | Cost: 0.1556 Epoch: 008/010 | Batch 100/469 | Cost: 0.0913 Epoch: 008/010 | Batch 150/469 | Cost: 0.0400 Epoch: 008/010 | Batch 200/469 | Cost: 0.0833 Epoch: 008/010 | Batch 250/469 | Cost: 0.0418 Epoch: 008/010 | Batch 300/469 | Cost: 0.0885 Epoch: 008/010 | Batch 350/469 | Cost: 0.0844 Epoch: 008/010 | Batch 400/469 | Cost: 0.0675 Epoch: 008/010 | Batch 450/469 | Cost: 0.1387 Epoch: 008/010 training accuracy: 97.56% Epoch: 009/010 | Batch 000/469 | Cost: 0.0827 Epoch: 009/010 | Batch 050/469 | Cost: 0.1027 Epoch: 009/010 | Batch 100/469 | Cost: 0.1812 Epoch: 009/010 | Batch 150/469 | Cost: 0.0660 Epoch: 009/010 | Batch 200/469 | Cost: 0.0881 Epoch: 009/010 | Batch 250/469 | Cost: 0.1576 Epoch: 009/010 | Batch 300/469 | Cost: 0.0478 Epoch: 009/010 | Batch 350/469 | Cost: 0.0779 Epoch: 009/010 | Batch 400/469 | Cost: 0.0407 Epoch: 009/010 | Batch 450/469 | Cost: 0.0236 Epoch: 009/010 training accuracy: 97.83% Epoch: 010/010 | Batch 000/469 | Cost: 0.0182 Epoch: 010/010 | Batch 050/469 | Cost: 0.0742 Epoch: 010/010 | Batch 100/469 | Cost: 0.0425 Epoch: 010/010 | Batch 150/469 | Cost: 0.0332 Epoch: 010/010 | Batch 200/469 | Cost: 0.0795 Epoch: 010/010 | Batch 250/469 | Cost: 0.0571 Epoch: 010/010 | Batch 300/469 | Cost: 0.1068 Epoch: 010/010 | Batch 350/469 | Cost: 0.1661 Epoch: 010/010 | Batch 400/469 | Cost: 0.0202 Epoch: 010/010 | Batch 450/469 | Cost: 0.0613 Epoch: 010/010 training accuracy: 97.77%
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
Test accuracy: 97.76%