Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
Sebastian Raschka CPython 3.6.8 IPython 7.2.0 torch 1.0.1.post2
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
##########################
### SETTINGS
##########################
# Device
DEVICE = torch.device("cuda:3" if torch.cuda.is_available() else "cpu")
print('Device:', DEVICE)
# Hyperparameters
random_seed = 1
learning_rate = 0.001
num_epochs = 10
batch_size = 128
# Architecture
num_features = 784
num_classes = 10
##########################
### MNIST DATASET
##########################
# Note transforms.ToTensor() scales input images
# to 0-1 range
train_dataset = datasets.CIFAR10(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.CIFAR10(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
Device: cuda:3 Files already downloaded and verified Image batch dimensions: torch.Size([128, 3, 32, 32]) Image label dimensions: torch.Size([128])
##########################
### MODEL
##########################
class VGG16(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(VGG16, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
self.block_1 = nn.Sequential(
nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
# (1(32-1)- 32 + 3)/2 = 1
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_2 = nn.Sequential(
nn.Conv2d(in_channels=64,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_3 = nn.Sequential(
nn.Conv2d(in_channels=128,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_4 = nn.Sequential(
nn.Conv2d(in_channels=256,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_5 = nn.Sequential(
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.classifier = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(True),
#nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(True),
#nn.Dropout(p=0.5),
nn.Linear(4096, num_classes),
)
for m in self.modules():
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):
nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
m.bias.detach().zero_()
#self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
def forward(self, x):
x = self.block_1(x)
x = self.block_2(x)
x = self.block_3(x)
x = self.block_4(x)
x = self.block_5(x)
#x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.classifier(x)
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(random_seed)
model = VGG16(num_features=num_features,
num_classes=num_classes)
model = model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def compute_accuracy(model, data_loader):
model.eval()
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
def compute_epoch_loss(model, data_loader):
model.eval()
curr_loss, num_examples = 0., 0
with torch.no_grad():
for features, targets in data_loader:
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits, probas = model(features)
loss = F.cross_entropy(logits, targets, reduction='sum')
num_examples += targets.size(0)
curr_loss += loss
curr_loss = curr_loss / num_examples
return curr_loss
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Loss: %.3f%%' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader),
compute_epoch_loss(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
Epoch: 001/010 | Batch 0000/0391 | Cost: 2.4443 Epoch: 001/010 | Batch 0050/0391 | Cost: 2.3136 Epoch: 001/010 | Batch 0100/0391 | Cost: 2.1749 Epoch: 001/010 | Batch 0150/0391 | Cost: 1.9730 Epoch: 001/010 | Batch 0200/0391 | Cost: 1.9690 Epoch: 001/010 | Batch 0250/0391 | Cost: 1.8496 Epoch: 001/010 | Batch 0300/0391 | Cost: 1.7281 Epoch: 001/010 | Batch 0350/0391 | Cost: 1.7167 Epoch: 001/010 | Train: 27.968% | Loss: 1.767% Time elapsed: 0.58 min Epoch: 002/010 | Batch 0000/0391 | Cost: 1.8197 Epoch: 002/010 | Batch 0050/0391 | Cost: 1.8475 Epoch: 002/010 | Batch 0100/0391 | Cost: 1.6369 Epoch: 002/010 | Batch 0150/0391 | Cost: 1.5247 Epoch: 002/010 | Batch 0200/0391 | Cost: 1.3244 Epoch: 002/010 | Batch 0250/0391 | Cost: 1.4992 Epoch: 002/010 | Batch 0300/0391 | Cost: 1.3846 Epoch: 002/010 | Batch 0350/0391 | Cost: 1.3030 Epoch: 002/010 | Train: 50.406% | Loss: 1.338% Time elapsed: 1.15 min Epoch: 003/010 | Batch 0000/0391 | Cost: 1.3428 Epoch: 003/010 | Batch 0050/0391 | Cost: 1.2384 Epoch: 003/010 | Batch 0100/0391 | Cost: 1.3957 Epoch: 003/010 | Batch 0150/0391 | Cost: 1.2707 Epoch: 003/010 | Batch 0200/0391 | Cost: 1.1567 Epoch: 003/010 | Batch 0250/0391 | Cost: 1.1354 Epoch: 003/010 | Batch 0300/0391 | Cost: 1.2811 Epoch: 003/010 | Batch 0350/0391 | Cost: 1.0373 Epoch: 003/010 | Train: 63.638% | Loss: 1.023% Time elapsed: 1.74 min Epoch: 004/010 | Batch 0000/0391 | Cost: 0.9725 Epoch: 004/010 | Batch 0050/0391 | Cost: 1.0079 Epoch: 004/010 | Batch 0100/0391 | Cost: 1.0854 Epoch: 004/010 | Batch 0150/0391 | Cost: 0.9969 Epoch: 004/010 | Batch 0200/0391 | Cost: 1.0539 Epoch: 004/010 | Batch 0250/0391 | Cost: 1.0105 Epoch: 004/010 | Batch 0300/0391 | Cost: 0.9463 Epoch: 004/010 | Batch 0350/0391 | Cost: 0.9690 Epoch: 004/010 | Train: 70.548% | Loss: 0.848% Time elapsed: 2.34 min Epoch: 005/010 | Batch 0000/0391 | Cost: 0.8491 Epoch: 005/010 | Batch 0050/0391 | Cost: 0.7533 Epoch: 005/010 | Batch 0100/0391 | Cost: 0.9738 Epoch: 005/010 | Batch 0150/0391 | Cost: 0.7582 Epoch: 005/010 | Batch 0200/0391 | Cost: 0.8555 Epoch: 005/010 | Batch 0250/0391 | Cost: 0.6898 Epoch: 005/010 | Batch 0300/0391 | Cost: 0.6234 Epoch: 005/010 | Batch 0350/0391 | Cost: 0.6334 Epoch: 005/010 | Train: 70.334% | Loss: 0.859% Time elapsed: 2.95 min Epoch: 006/010 | Batch 0000/0391 | Cost: 0.7332 Epoch: 006/010 | Batch 0050/0391 | Cost: 0.6921 Epoch: 006/010 | Batch 0100/0391 | Cost: 0.7888 Epoch: 006/010 | Batch 0150/0391 | Cost: 0.5622 Epoch: 006/010 | Batch 0200/0391 | Cost: 0.5644 Epoch: 006/010 | Batch 0250/0391 | Cost: 0.7180 Epoch: 006/010 | Batch 0300/0391 | Cost: 0.9148 Epoch: 006/010 | Batch 0350/0391 | Cost: 0.6259 Epoch: 006/010 | Train: 80.834% | Loss: 0.571% Time elapsed: 3.56 min Epoch: 007/010 | Batch 0000/0391 | Cost: 0.5934 Epoch: 007/010 | Batch 0050/0391 | Cost: 0.6523 Epoch: 007/010 | Batch 0100/0391 | Cost: 0.7737 Epoch: 007/010 | Batch 0150/0391 | Cost: 0.5710 Epoch: 007/010 | Batch 0200/0391 | Cost: 0.5144 Epoch: 007/010 | Batch 0250/0391 | Cost: 0.5684 Epoch: 007/010 | Batch 0300/0391 | Cost: 0.6559 Epoch: 007/010 | Batch 0350/0391 | Cost: 0.7232 Epoch: 007/010 | Train: 83.330% | Loss: 0.513% Time elapsed: 4.16 min Epoch: 008/010 | Batch 0000/0391 | Cost: 0.4899 Epoch: 008/010 | Batch 0050/0391 | Cost: 0.4403 Epoch: 008/010 | Batch 0100/0391 | Cost: 0.4438 Epoch: 008/010 | Batch 0150/0391 | Cost: 0.5328 Epoch: 008/010 | Batch 0200/0391 | Cost: 0.5851 Epoch: 008/010 | Batch 0250/0391 | Cost: 0.4817 Epoch: 008/010 | Batch 0300/0391 | Cost: 0.5693 Epoch: 008/010 | Batch 0350/0391 | Cost: 0.5650 Epoch: 008/010 | Train: 85.476% | Loss: 0.427% Time elapsed: 4.77 min Epoch: 009/010 | Batch 0000/0391 | Cost: 0.4778 Epoch: 009/010 | Batch 0050/0391 | Cost: 0.3859 Epoch: 009/010 | Batch 0100/0391 | Cost: 0.4461 Epoch: 009/010 | Batch 0150/0391 | Cost: 0.4367 Epoch: 009/010 | Batch 0200/0391 | Cost: 0.3941 Epoch: 009/010 | Batch 0250/0391 | Cost: 0.4247 Epoch: 009/010 | Batch 0300/0391 | Cost: 0.4079 Epoch: 009/010 | Batch 0350/0391 | Cost: 0.3797 Epoch: 009/010 | Train: 88.140% | Loss: 0.359% Time elapsed: 5.37 min Epoch: 010/010 | Batch 0000/0391 | Cost: 0.2155 Epoch: 010/010 | Batch 0050/0391 | Cost: 0.3470 Epoch: 010/010 | Batch 0100/0391 | Cost: 0.2810 Epoch: 010/010 | Batch 0150/0391 | Cost: 0.3658 Epoch: 010/010 | Batch 0200/0391 | Cost: 0.3964 Epoch: 010/010 | Batch 0250/0391 | Cost: 0.3705 Epoch: 010/010 | Batch 0300/0391 | Cost: 0.4255 Epoch: 010/010 | Batch 0350/0391 | Cost: 0.4562 Epoch: 010/010 | Train: 87.974% | Loss: 0.366% Time elapsed: 5.98 min Total Training Time: 5.98 min
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
Test accuracy: 75.43%
%watermark -iv
numpy 1.15.4 torch 1.0.1.post2