Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
Sebastian Raschka CPython 3.6.8 IPython 7.2.0 torch 1.0.0
import time
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
##########################
### SETTINGS
##########################
# Device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Hyperparameters
random_seed = 1
learning_rate = 0.1
num_epochs = 10
batch_size = 64
dropout_prob = 0.5
# Architecture
num_features = 784
num_hidden_1 = 128
num_hidden_2 = 256
num_classes = 10
##########################
### MNIST DATASET
##########################
# Note transforms.ToTensor() scales input images
# to 0-1 range
train_dataset = datasets.MNIST(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
Image batch dimensions: torch.Size([64, 1, 28, 28]) Image label dimensions: torch.Size([64])
##########################
### MODEL
##########################
class MultilayerPerceptron(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(MultilayerPerceptron, self).__init__()
### 1st hidden layer
self.linear_1 = torch.nn.Linear(num_features, num_hidden_1)
# The following to lones are not necessary,
# but used here to demonstrate how to access the weights
# and use a different weight initialization.
# By default, PyTorch uses Xavier/Glorot initialization, which
# should usually be preferred.
self.linear_1.weight.detach().normal_(0.0, 0.1)
self.linear_1.bias.detach().zero_()
### 2nd hidden layer
self.linear_2 = torch.nn.Linear(num_hidden_1, num_hidden_2)
self.linear_2.weight.detach().normal_(0.0, 0.1)
self.linear_2.bias.detach().zero_()
### Output layer
self.linear_out = torch.nn.Linear(num_hidden_2, num_classes)
self.linear_out.weight.detach().normal_(0.0, 0.1)
self.linear_out.bias.detach().zero_()
def forward(self, x):
out = self.linear_1(x)
out = F.relu(out)
out = F.dropout(out, p=dropout_prob, training=self.training)
out = self.linear_2(out)
out = F.relu(out)
out = F.dropout(out, p=dropout_prob, training=self.training)
logits = self.linear_out(out)
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(random_seed)
model = MultilayerPerceptron(num_features=num_features,
num_classes=num_classes)
model = model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
def compute_accuracy(net, data_loader):
net.eval()
correct_pred, num_examples = 0, 0
with torch.no_grad():
for features, targets in data_loader:
features = features.view(-1, 28*28).to(device)
targets = targets.to(device)
logits, probas = net(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.view(-1, 28*28).to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
print('Epoch: %03d/%03d training accuracy: %.2f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
Epoch: 001/010 | Batch 000/938 | Cost: 3.1761 Epoch: 001/010 | Batch 050/938 | Cost: 1.2749 Epoch: 001/010 | Batch 100/938 | Cost: 0.8759 Epoch: 001/010 | Batch 150/938 | Cost: 0.9843 Epoch: 001/010 | Batch 200/938 | Cost: 0.8911 Epoch: 001/010 | Batch 250/938 | Cost: 0.6245 Epoch: 001/010 | Batch 300/938 | Cost: 0.7050 Epoch: 001/010 | Batch 350/938 | Cost: 0.6426 Epoch: 001/010 | Batch 400/938 | Cost: 0.4462 Epoch: 001/010 | Batch 450/938 | Cost: 0.5854 Epoch: 001/010 | Batch 500/938 | Cost: 0.5844 Epoch: 001/010 | Batch 550/938 | Cost: 0.4228 Epoch: 001/010 | Batch 600/938 | Cost: 0.4705 Epoch: 001/010 | Batch 650/938 | Cost: 0.7149 Epoch: 001/010 | Batch 700/938 | Cost: 0.4342 Epoch: 001/010 | Batch 750/938 | Cost: 0.5987 Epoch: 001/010 | Batch 800/938 | Cost: 0.2601 Epoch: 001/010 | Batch 850/938 | Cost: 0.2195 Epoch: 001/010 | Batch 900/938 | Cost: 0.4569 Epoch: 001/010 training accuracy: 93.04% Time elapsed: 0.22 min Epoch: 002/010 | Batch 000/938 | Cost: 0.6818 Epoch: 002/010 | Batch 050/938 | Cost: 0.4469 Epoch: 002/010 | Batch 100/938 | Cost: 0.4394 Epoch: 002/010 | Batch 150/938 | Cost: 0.4237 Epoch: 002/010 | Batch 200/938 | Cost: 0.4906 Epoch: 002/010 | Batch 250/938 | Cost: 0.3429 Epoch: 002/010 | Batch 300/938 | Cost: 0.2792 Epoch: 002/010 | Batch 350/938 | Cost: 0.3293 Epoch: 002/010 | Batch 400/938 | Cost: 0.3887 Epoch: 002/010 | Batch 450/938 | Cost: 0.3144 Epoch: 002/010 | Batch 500/938 | Cost: 0.4899 Epoch: 002/010 | Batch 550/938 | Cost: 0.4949 Epoch: 002/010 | Batch 600/938 | Cost: 0.4052 Epoch: 002/010 | Batch 650/938 | Cost: 0.4248 Epoch: 002/010 | Batch 700/938 | Cost: 0.4013 Epoch: 002/010 | Batch 750/938 | Cost: 0.3184 Epoch: 002/010 | Batch 800/938 | Cost: 0.5368 Epoch: 002/010 | Batch 850/938 | Cost: 0.2178 Epoch: 002/010 | Batch 900/938 | Cost: 0.2532 Epoch: 002/010 training accuracy: 94.53% Time elapsed: 0.44 min Epoch: 003/010 | Batch 000/938 | Cost: 0.2330 Epoch: 003/010 | Batch 050/938 | Cost: 0.2030 Epoch: 003/010 | Batch 100/938 | Cost: 0.3366 Epoch: 003/010 | Batch 150/938 | Cost: 0.4300 Epoch: 003/010 | Batch 200/938 | Cost: 0.3449 Epoch: 003/010 | Batch 250/938 | Cost: 0.5312 Epoch: 003/010 | Batch 300/938 | Cost: 0.2596 Epoch: 003/010 | Batch 350/938 | Cost: 0.2119 Epoch: 003/010 | Batch 400/938 | Cost: 0.1706 Epoch: 003/010 | Batch 450/938 | Cost: 0.1963 Epoch: 003/010 | Batch 500/938 | Cost: 0.1826 Epoch: 003/010 | Batch 550/938 | Cost: 0.1639 Epoch: 003/010 | Batch 600/938 | Cost: 0.3906 Epoch: 003/010 | Batch 650/938 | Cost: 0.2251 Epoch: 003/010 | Batch 700/938 | Cost: 0.5097 Epoch: 003/010 | Batch 750/938 | Cost: 0.1816 Epoch: 003/010 | Batch 800/938 | Cost: 0.2478 Epoch: 003/010 | Batch 850/938 | Cost: 0.0872 Epoch: 003/010 | Batch 900/938 | Cost: 0.2131 Epoch: 003/010 training accuracy: 95.74% Time elapsed: 0.66 min Epoch: 004/010 | Batch 000/938 | Cost: 0.0537 Epoch: 004/010 | Batch 050/938 | Cost: 0.2216 Epoch: 004/010 | Batch 100/938 | Cost: 0.2560 Epoch: 004/010 | Batch 150/938 | Cost: 0.3367 Epoch: 004/010 | Batch 200/938 | Cost: 0.2161 Epoch: 004/010 | Batch 250/938 | Cost: 0.3530 Epoch: 004/010 | Batch 300/938 | Cost: 0.4150 Epoch: 004/010 | Batch 350/938 | Cost: 0.1628 Epoch: 004/010 | Batch 400/938 | Cost: 0.3844 Epoch: 004/010 | Batch 450/938 | Cost: 0.3700 Epoch: 004/010 | Batch 500/938 | Cost: 0.3258 Epoch: 004/010 | Batch 550/938 | Cost: 0.1491 Epoch: 004/010 | Batch 600/938 | Cost: 0.4124 Epoch: 004/010 | Batch 650/938 | Cost: 0.1568 Epoch: 004/010 | Batch 700/938 | Cost: 0.2867 Epoch: 004/010 | Batch 750/938 | Cost: 0.3083 Epoch: 004/010 | Batch 800/938 | Cost: 0.2953 Epoch: 004/010 | Batch 850/938 | Cost: 0.2130 Epoch: 004/010 | Batch 900/938 | Cost: 0.1325 Epoch: 004/010 training accuracy: 95.93% Time elapsed: 0.88 min Epoch: 005/010 | Batch 000/938 | Cost: 0.1164 Epoch: 005/010 | Batch 050/938 | Cost: 0.2033 Epoch: 005/010 | Batch 100/938 | Cost: 0.4225 Epoch: 005/010 | Batch 150/938 | Cost: 0.2332 Epoch: 005/010 | Batch 200/938 | Cost: 0.1807 Epoch: 005/010 | Batch 250/938 | Cost: 0.2724 Epoch: 005/010 | Batch 300/938 | Cost: 0.2070 Epoch: 005/010 | Batch 350/938 | Cost: 0.3846 Epoch: 005/010 | Batch 400/938 | Cost: 0.1403 Epoch: 005/010 | Batch 450/938 | Cost: 0.1435 Epoch: 005/010 | Batch 500/938 | Cost: 0.1864 Epoch: 005/010 | Batch 550/938 | Cost: 0.4659 Epoch: 005/010 | Batch 600/938 | Cost: 0.2498 Epoch: 005/010 | Batch 650/938 | Cost: 0.1097 Epoch: 005/010 | Batch 700/938 | Cost: 0.1233 Epoch: 005/010 | Batch 750/938 | Cost: 0.1797 Epoch: 005/010 | Batch 800/938 | Cost: 0.2743 Epoch: 005/010 | Batch 850/938 | Cost: 0.4755 Epoch: 005/010 | Batch 900/938 | Cost: 0.1791 Epoch: 005/010 training accuracy: 96.62% Time elapsed: 1.10 min Epoch: 006/010 | Batch 000/938 | Cost: 0.2512 Epoch: 006/010 | Batch 050/938 | Cost: 0.2439 Epoch: 006/010 | Batch 100/938 | Cost: 0.2688 Epoch: 006/010 | Batch 150/938 | Cost: 0.2428 Epoch: 006/010 | Batch 200/938 | Cost: 0.1508 Epoch: 006/010 | Batch 250/938 | Cost: 0.2942 Epoch: 006/010 | Batch 300/938 | Cost: 0.3477 Epoch: 006/010 | Batch 350/938 | Cost: 0.2686 Epoch: 006/010 | Batch 400/938 | Cost: 0.1796 Epoch: 006/010 | Batch 450/938 | Cost: 0.3615 Epoch: 006/010 | Batch 500/938 | Cost: 0.1728 Epoch: 006/010 | Batch 550/938 | Cost: 0.2942 Epoch: 006/010 | Batch 600/938 | Cost: 0.2126 Epoch: 006/010 | Batch 650/938 | Cost: 0.1768 Epoch: 006/010 | Batch 700/938 | Cost: 0.3725 Epoch: 006/010 | Batch 750/938 | Cost: 0.4141 Epoch: 006/010 | Batch 800/938 | Cost: 0.0981 Epoch: 006/010 | Batch 850/938 | Cost: 0.2725 Epoch: 006/010 | Batch 900/938 | Cost: 0.3742 Epoch: 006/010 training accuracy: 96.80% Time elapsed: 1.33 min Epoch: 007/010 | Batch 000/938 | Cost: 0.0982 Epoch: 007/010 | Batch 050/938 | Cost: 0.3788 Epoch: 007/010 | Batch 100/938 | Cost: 0.2841 Epoch: 007/010 | Batch 150/938 | Cost: 0.2822 Epoch: 007/010 | Batch 200/938 | Cost: 0.2435 Epoch: 007/010 | Batch 250/938 | Cost: 0.1331 Epoch: 007/010 | Batch 300/938 | Cost: 0.3305 Epoch: 007/010 | Batch 350/938 | Cost: 0.3543 Epoch: 007/010 | Batch 400/938 | Cost: 0.1692 Epoch: 007/010 | Batch 450/938 | Cost: 0.2723 Epoch: 007/010 | Batch 500/938 | Cost: 0.2608 Epoch: 007/010 | Batch 550/938 | Cost: 0.2191 Epoch: 007/010 | Batch 600/938 | Cost: 0.3432 Epoch: 007/010 | Batch 650/938 | Cost: 0.2180 Epoch: 007/010 | Batch 700/938 | Cost: 0.2242 Epoch: 007/010 | Batch 750/938 | Cost: 0.2166 Epoch: 007/010 | Batch 800/938 | Cost: 0.1156 Epoch: 007/010 | Batch 850/938 | Cost: 0.1677 Epoch: 007/010 | Batch 900/938 | Cost: 0.2352 Epoch: 007/010 training accuracy: 97.08% Time elapsed: 1.55 min Epoch: 008/010 | Batch 000/938 | Cost: 0.2279 Epoch: 008/010 | Batch 050/938 | Cost: 0.1192 Epoch: 008/010 | Batch 100/938 | Cost: 0.3367 Epoch: 008/010 | Batch 150/938 | Cost: 0.2009 Epoch: 008/010 | Batch 200/938 | Cost: 0.1724 Epoch: 008/010 | Batch 250/938 | Cost: 0.3747 Epoch: 008/010 | Batch 300/938 | Cost: 0.3699 Epoch: 008/010 | Batch 350/938 | Cost: 0.2708 Epoch: 008/010 | Batch 400/938 | Cost: 0.1173 Epoch: 008/010 | Batch 450/938 | Cost: 0.3007 Epoch: 008/010 | Batch 500/938 | Cost: 0.1174 Epoch: 008/010 | Batch 550/938 | Cost: 0.1924 Epoch: 008/010 | Batch 600/938 | Cost: 0.0708 Epoch: 008/010 | Batch 650/938 | Cost: 0.0882 Epoch: 008/010 | Batch 700/938 | Cost: 0.1822 Epoch: 008/010 | Batch 750/938 | Cost: 0.1415 Epoch: 008/010 | Batch 800/938 | Cost: 0.1324 Epoch: 008/010 | Batch 850/938 | Cost: 0.1612 Epoch: 008/010 | Batch 900/938 | Cost: 0.2157 Epoch: 008/010 training accuracy: 97.30% Time elapsed: 1.77 min Epoch: 009/010 | Batch 000/938 | Cost: 0.2361 Epoch: 009/010 | Batch 050/938 | Cost: 0.2223 Epoch: 009/010 | Batch 100/938 | Cost: 0.2047 Epoch: 009/010 | Batch 150/938 | Cost: 0.0970 Epoch: 009/010 | Batch 200/938 | Cost: 0.2133 Epoch: 009/010 | Batch 250/938 | Cost: 0.0939 Epoch: 009/010 | Batch 300/938 | Cost: 0.1779 Epoch: 009/010 | Batch 350/938 | Cost: 0.0470 Epoch: 009/010 | Batch 400/938 | Cost: 0.4539 Epoch: 009/010 | Batch 450/938 | Cost: 0.1450 Epoch: 009/010 | Batch 500/938 | Cost: 0.1942 Epoch: 009/010 | Batch 550/938 | Cost: 0.2646 Epoch: 009/010 | Batch 600/938 | Cost: 0.3475 Epoch: 009/010 | Batch 650/938 | Cost: 0.1753 Epoch: 009/010 | Batch 700/938 | Cost: 0.3570 Epoch: 009/010 | Batch 750/938 | Cost: 0.2693 Epoch: 009/010 | Batch 800/938 | Cost: 0.1132 Epoch: 009/010 | Batch 850/938 | Cost: 0.4668 Epoch: 009/010 | Batch 900/938 | Cost: 0.1920 Epoch: 009/010 training accuracy: 97.38% Time elapsed: 1.99 min Epoch: 010/010 | Batch 000/938 | Cost: 0.1652 Epoch: 010/010 | Batch 050/938 | Cost: 0.2654 Epoch: 010/010 | Batch 100/938 | Cost: 0.1164 Epoch: 010/010 | Batch 150/938 | Cost: 0.1916 Epoch: 010/010 | Batch 200/938 | Cost: 0.1833 Epoch: 010/010 | Batch 250/938 | Cost: 0.1914 Epoch: 010/010 | Batch 300/938 | Cost: 0.1332 Epoch: 010/010 | Batch 350/938 | Cost: 0.1535 Epoch: 010/010 | Batch 400/938 | Cost: 0.0945 Epoch: 010/010 | Batch 450/938 | Cost: 0.1842 Epoch: 010/010 | Batch 500/938 | Cost: 0.2954 Epoch: 010/010 | Batch 550/938 | Cost: 0.0577 Epoch: 010/010 | Batch 600/938 | Cost: 0.1223 Epoch: 010/010 | Batch 650/938 | Cost: 0.2175 Epoch: 010/010 | Batch 700/938 | Cost: 0.2758 Epoch: 010/010 | Batch 750/938 | Cost: 0.0905 Epoch: 010/010 | Batch 800/938 | Cost: 0.1565 Epoch: 010/010 | Batch 850/938 | Cost: 0.2303 Epoch: 010/010 | Batch 900/938 | Cost: 0.1794 Epoch: 010/010 training accuracy: 97.52% Time elapsed: 2.20 min Total Training Time: 2.20 min
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
Test accuracy: 96.71%
%watermark -iv
numpy 1.15.4 torch 1.0.0