Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.

In [1]:
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
Sebastian Raschka 

CPython 3.7.3
IPython 7.6.1

torch 1.1.0

AlexNet CIFAR-10 Classifier

Network Architecture

References

Imports

In [2]:
import os
import time

import numpy as np
import pandas as pd

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset

from torchvision import datasets
from torchvision import transforms

import matplotlib.pyplot as plt
from PIL import Image


if torch.cuda.is_available():
    torch.backends.cudnn.deterministic = True

Model Settings

In [3]:
##########################
### SETTINGS
##########################

# Hyperparameters
RANDOM_SEED = 1
LEARNING_RATE = 0.0001
BATCH_SIZE = 256
NUM_EPOCHS = 40

# Architecture
NUM_CLASSES = 10

# Other
DEVICE = "cuda:0"

Dataset

In [4]:
train_indices = torch.arange(0, 48000)
valid_indices = torch.arange(48000, 50000)


train_transform = transforms.Compose([transforms.Resize((70, 70)),
                                      transforms.RandomCrop((64, 64)),
                                      transforms.ToTensor()])

test_transform = transforms.Compose([transforms.Resize((70, 70)),
                                     transforms.CenterCrop((64, 64)),
                                     transforms.ToTensor()])

train_and_valid = datasets.CIFAR10(root='data', 
                                   train=True, 
                                   transform=train_transform,
                                   download=True)

train_dataset = Subset(train_and_valid, train_indices)
valid_dataset = Subset(train_and_valid, valid_indices)
test_dataset = datasets.CIFAR10(root='data', 
                                train=False, 
                                transform=test_transform,
                                download=False)




train_loader = DataLoader(dataset=train_dataset, 
                          batch_size=BATCH_SIZE,
                          num_workers=4,
                          shuffle=True)

valid_loader = DataLoader(dataset=valid_dataset, 
                          batch_size=BATCH_SIZE,
                          num_workers=4,
                          shuffle=False)

test_loader = DataLoader(dataset=test_dataset, 
                         batch_size=BATCH_SIZE,
                         num_workers=4,
                         shuffle=False)
Files already downloaded and verified
In [5]:
# Checking the dataset
print('Training Set:\n')
for images, labels in train_loader:  
    print('Image batch dimensions:', images.size())
    print('Image label dimensions:', labels.size())
    break
    
# Checking the dataset
print('\nValidation Set:')
for images, labels in valid_loader:  
    print('Image batch dimensions:', images.size())
    print('Image label dimensions:', labels.size())
    break

# Checking the dataset
print('\nTesting Set:')
for images, labels in train_loader:  
    print('Image batch dimensions:', images.size())
    print('Image label dimensions:', labels.size())
    break
Training Set:

Image batch dimensions: torch.Size([256, 3, 64, 64])
Image label dimensions: torch.Size([256])

Validation Set:
Image batch dimensions: torch.Size([256, 3, 64, 64])
Image label dimensions: torch.Size([256])

Testing Set:
Image batch dimensions: torch.Size([256, 3, 64, 64])
Image label dimensions: torch.Size([256])

Model

In [6]:
##########################
### MODEL
##########################

class AlexNet(nn.Module):

    def __init__(self, num_classes):
        super(AlexNet, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = x.view(x.size(0), 256 * 6 * 6)
        logits = self.classifier(x)
        probas = F.softmax(logits, dim=1)
        return logits, probas
In [7]:
torch.manual_seed(RANDOM_SEED)

model = AlexNet(NUM_CLASSES)
model.to(DEVICE)

optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)  

Training

In [8]:
def compute_acc(model, data_loader, device):
    correct_pred, num_examples = 0, 0
    model.eval()
    for i, (features, targets) in enumerate(data_loader):
            
        features = features.to(device)
        targets = targets.to(device)

        logits, probas = model(features)
        _, predicted_labels = torch.max(probas, 1)
        num_examples += targets.size(0)
        assert predicted_labels.size() == targets.size()
        correct_pred += (predicted_labels == targets).sum()
    return correct_pred.float()/num_examples * 100
    

start_time = time.time()

cost_list = []
train_acc_list, valid_acc_list = [], []


for epoch in range(NUM_EPOCHS):
    
    model.train()
    for batch_idx, (features, targets) in enumerate(train_loader):
        
        features = features.to(DEVICE)
        targets = targets.to(DEVICE)
            
        ### FORWARD AND BACK PROP
        logits, probas = model(features)
        cost = F.cross_entropy(logits, targets)
        optimizer.zero_grad()
        
        cost.backward()
        
        ### UPDATE MODEL PARAMETERS
        optimizer.step()
        
        #################################################
        ### CODE ONLY FOR LOGGING BEYOND THIS POINT
        ################################################
        cost_list.append(cost.item())
        if not batch_idx % 150:
            print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | '
                   f'Batch {batch_idx:03d}/{len(train_loader):03d} |' 
                   f' Cost: {cost:.4f}')

        

    model.eval()
    with torch.set_grad_enabled(False): # save memory during inference
        
        train_acc = compute_acc(model, train_loader, device=DEVICE)
        valid_acc = compute_acc(model, valid_loader, device=DEVICE)
        
        print(f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d}\n'
              f'Train ACC: {train_acc:.2f} | Validation ACC: {valid_acc:.2f}')
        
        train_acc_list.append(train_acc)
        valid_acc_list.append(valid_acc)
        
    elapsed = (time.time() - start_time)/60
    print(f'Time elapsed: {elapsed:.2f} min')
  
elapsed = (time.time() - start_time)/60
print(f'Total Training Time: {elapsed:.2f} min')
Epoch: 001/040 | Batch 000/188 | Cost: 2.3029
Epoch: 001/040 | Batch 150/188 | Cost: 1.7090
Epoch: 001/040
Train ACC: 31.92 | Validation ACC: 31.05
Time elapsed: 0.22 min
Epoch: 002/040 | Batch 000/188 | Cost: 1.7312
Epoch: 002/040 | Batch 150/188 | Cost: 1.6115
Epoch: 002/040
Train ACC: 43.78 | Validation ACC: 44.35
Time elapsed: 0.43 min
Epoch: 003/040 | Batch 000/188 | Cost: 1.5096
Epoch: 003/040 | Batch 150/188 | Cost: 1.4324
Epoch: 003/040
Train ACC: 53.03 | Validation ACC: 52.30
Time elapsed: 0.64 min
Epoch: 004/040 | Batch 000/188 | Cost: 1.3731
Epoch: 004/040 | Batch 150/188 | Cost: 1.2505
Epoch: 004/040
Train ACC: 56.87 | Validation ACC: 57.30
Time elapsed: 0.85 min
Epoch: 005/040 | Batch 000/188 | Cost: 1.0734
Epoch: 005/040 | Batch 150/188 | Cost: 1.1652
Epoch: 005/040
Train ACC: 60.97 | Validation ACC: 60.30
Time elapsed: 1.07 min
Epoch: 006/040 | Batch 000/188 | Cost: 1.0730
Epoch: 006/040 | Batch 150/188 | Cost: 1.1333
Epoch: 006/040
Train ACC: 62.87 | Validation ACC: 60.90
Time elapsed: 1.28 min
Epoch: 007/040 | Batch 000/188 | Cost: 1.0317
Epoch: 007/040 | Batch 150/188 | Cost: 1.0182
Epoch: 007/040
Train ACC: 67.03 | Validation ACC: 64.35
Time elapsed: 1.50 min
Epoch: 008/040 | Batch 000/188 | Cost: 1.0245
Epoch: 008/040 | Batch 150/188 | Cost: 0.9324
Epoch: 008/040
Train ACC: 64.87 | Validation ACC: 64.55
Time elapsed: 1.71 min
Epoch: 009/040 | Batch 000/188 | Cost: 1.0012
Epoch: 009/040 | Batch 150/188 | Cost: 0.8525
Epoch: 009/040
Train ACC: 70.84 | Validation ACC: 67.30
Time elapsed: 1.93 min
Epoch: 010/040 | Batch 000/188 | Cost: 0.7442
Epoch: 010/040 | Batch 150/188 | Cost: 0.7908
Epoch: 010/040
Train ACC: 70.95 | Validation ACC: 67.10
Time elapsed: 2.14 min
Epoch: 011/040 | Batch 000/188 | Cost: 0.8389
Epoch: 011/040 | Batch 150/188 | Cost: 0.8383
Epoch: 011/040
Train ACC: 74.18 | Validation ACC: 69.95
Time elapsed: 2.36 min
Epoch: 012/040 | Batch 000/188 | Cost: 0.7037
Epoch: 012/040 | Batch 150/188 | Cost: 0.9285
Epoch: 012/040
Train ACC: 74.23 | Validation ACC: 66.70
Time elapsed: 2.57 min
Epoch: 013/040 | Batch 000/188 | Cost: 0.7205
Epoch: 013/040 | Batch 150/188 | Cost: 0.7099
Epoch: 013/040
Train ACC: 76.88 | Validation ACC: 70.00
Time elapsed: 2.78 min
Epoch: 014/040 | Batch 000/188 | Cost: 0.6575
Epoch: 014/040 | Batch 150/188 | Cost: 0.6311
Epoch: 014/040
Train ACC: 76.69 | Validation ACC: 70.00
Time elapsed: 3.00 min
Epoch: 015/040 | Batch 000/188 | Cost: 0.6724
Epoch: 015/040 | Batch 150/188 | Cost: 0.8899
Epoch: 015/040
Train ACC: 80.01 | Validation ACC: 71.80
Time elapsed: 3.22 min
Epoch: 016/040 | Batch 000/188 | Cost: 0.6895
Epoch: 016/040 | Batch 150/188 | Cost: 0.5913
Epoch: 016/040
Train ACC: 79.64 | Validation ACC: 70.75
Time elapsed: 3.43 min
Epoch: 017/040 | Batch 000/188 | Cost: 0.6096
Epoch: 017/040 | Batch 150/188 | Cost: 0.5401
Epoch: 017/040
Train ACC: 82.48 | Validation ACC: 72.20
Time elapsed: 3.65 min
Epoch: 018/040 | Batch 000/188 | Cost: 0.5421
Epoch: 018/040 | Batch 150/188 | Cost: 0.4187
Epoch: 018/040
Train ACC: 84.17 | Validation ACC: 73.60
Time elapsed: 3.86 min
Epoch: 019/040 | Batch 000/188 | Cost: 0.4490
Epoch: 019/040 | Batch 150/188 | Cost: 0.4658
Epoch: 019/040
Train ACC: 84.06 | Validation ACC: 72.65
Time elapsed: 4.08 min
Epoch: 020/040 | Batch 000/188 | Cost: 0.4837
Epoch: 020/040 | Batch 150/188 | Cost: 0.4519
Epoch: 020/040
Train ACC: 86.10 | Validation ACC: 72.90
Time elapsed: 4.29 min
Epoch: 021/040 | Batch 000/188 | Cost: 0.4615
Epoch: 021/040 | Batch 150/188 | Cost: 0.5283
Epoch: 021/040
Train ACC: 85.61 | Validation ACC: 72.10
Time elapsed: 4.51 min
Epoch: 022/040 | Batch 000/188 | Cost: 0.4693
Epoch: 022/040 | Batch 150/188 | Cost: 0.4589
Epoch: 022/040
Train ACC: 88.70 | Validation ACC: 73.90
Time elapsed: 4.72 min
Epoch: 023/040 | Batch 000/188 | Cost: 0.2818
Epoch: 023/040 | Batch 150/188 | Cost: 0.4123
Epoch: 023/040
Train ACC: 89.58 | Validation ACC: 73.45
Time elapsed: 4.94 min
Epoch: 024/040 | Batch 000/188 | Cost: 0.3030
Epoch: 024/040 | Batch 150/188 | Cost: 0.3685
Epoch: 024/040
Train ACC: 90.44 | Validation ACC: 73.60
Time elapsed: 5.15 min
Epoch: 025/040 | Batch 000/188 | Cost: 0.2399
Epoch: 025/040 | Batch 150/188 | Cost: 0.3384
Epoch: 025/040
Train ACC: 90.85 | Validation ACC: 73.35
Time elapsed: 5.37 min
Epoch: 026/040 | Batch 000/188 | Cost: 0.2333
Epoch: 026/040 | Batch 150/188 | Cost: 0.2852
Epoch: 026/040
Train ACC: 92.25 | Validation ACC: 72.20
Time elapsed: 5.58 min
Epoch: 027/040 | Batch 000/188 | Cost: 0.2728
Epoch: 027/040 | Batch 150/188 | Cost: 0.3350
Epoch: 027/040
Train ACC: 91.94 | Validation ACC: 73.85
Time elapsed: 5.80 min
Epoch: 028/040 | Batch 000/188 | Cost: 0.2277
Epoch: 028/040 | Batch 150/188 | Cost: 0.2987
Epoch: 028/040
Train ACC: 92.84 | Validation ACC: 72.85
Time elapsed: 6.02 min
Epoch: 029/040 | Batch 000/188 | Cost: 0.2115
Epoch: 029/040 | Batch 150/188 | Cost: 0.2038
Epoch: 029/040
Train ACC: 92.28 | Validation ACC: 72.50
Time elapsed: 6.23 min
Epoch: 030/040 | Batch 000/188 | Cost: 0.1841
Epoch: 030/040 | Batch 150/188 | Cost: 0.2074
Epoch: 030/040
Train ACC: 94.63 | Validation ACC: 74.00
Time elapsed: 6.44 min
Epoch: 031/040 | Batch 000/188 | Cost: 0.1490
Epoch: 031/040 | Batch 150/188 | Cost: 0.2191
Epoch: 031/040
Train ACC: 94.67 | Validation ACC: 72.60
Time elapsed: 6.66 min
Epoch: 032/040 | Batch 000/188 | Cost: 0.1719
Epoch: 032/040 | Batch 150/188 | Cost: 0.1990
Epoch: 032/040
Train ACC: 93.93 | Validation ACC: 71.60
Time elapsed: 6.87 min
Epoch: 033/040 | Batch 000/188 | Cost: 0.1839
Epoch: 033/040 | Batch 150/188 | Cost: 0.1939
Epoch: 033/040
Train ACC: 95.61 | Validation ACC: 73.75
Time elapsed: 7.09 min
Epoch: 034/040 | Batch 000/188 | Cost: 0.0995
Epoch: 034/040 | Batch 150/188 | Cost: 0.1726
Epoch: 034/040
Train ACC: 95.35 | Validation ACC: 73.85
Time elapsed: 7.30 min
Epoch: 035/040 | Batch 000/188 | Cost: 0.1451
Epoch: 035/040 | Batch 150/188 | Cost: 0.1414
Epoch: 035/040
Train ACC: 95.90 | Validation ACC: 73.55
Time elapsed: 7.52 min
Epoch: 036/040 | Batch 000/188 | Cost: 0.0551
Epoch: 036/040 | Batch 150/188 | Cost: 0.1009
Epoch: 036/040
Train ACC: 95.40 | Validation ACC: 72.45
Time elapsed: 7.73 min
Epoch: 037/040 | Batch 000/188 | Cost: 0.1616
Epoch: 037/040 | Batch 150/188 | Cost: 0.1102
Epoch: 037/040
Train ACC: 96.08 | Validation ACC: 72.55
Time elapsed: 7.95 min
Epoch: 038/040 | Batch 000/188 | Cost: 0.1409
Epoch: 038/040 | Batch 150/188 | Cost: 0.1090
Epoch: 038/040
Train ACC: 96.69 | Validation ACC: 73.85
Time elapsed: 8.16 min
Epoch: 039/040 | Batch 000/188 | Cost: 0.1088
Epoch: 039/040 | Batch 150/188 | Cost: 0.1309
Epoch: 039/040
Train ACC: 96.11 | Validation ACC: 72.15
Time elapsed: 8.38 min
Epoch: 040/040 | Batch 000/188 | Cost: 0.1418
Epoch: 040/040 | Batch 150/188 | Cost: 0.1745
Epoch: 040/040
Train ACC: 97.45 | Validation ACC: 74.00
Time elapsed: 8.59 min
Total Training Time: 8.59 min

Evaluation

In [9]:
import matplotlib.pyplot as plt
%matplotlib inline
In [10]:
plt.plot(cost_list, label='Minibatch cost')
plt.plot(np.convolve(cost_list, 
                     np.ones(200,)/200, mode='valid'), 
         label='Running average')

plt.ylabel('Cross Entropy')
plt.xlabel('Iteration')
plt.legend()
plt.show()
In [11]:
plt.plot(np.arange(1, NUM_EPOCHS+1), train_acc_list, label='Training')
plt.plot(np.arange(1, NUM_EPOCHS+1), valid_acc_list, label='Validation')

plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
In [12]:
with torch.set_grad_enabled(False):
    test_acc = compute_acc(model=model,
                           data_loader=test_loader,
                           device=DEVICE)
    
    valid_acc = compute_acc(model=model,
                            data_loader=valid_loader,
                            device=DEVICE)
    

print(f'Validation ACC: {valid_acc:.2f}%')
print(f'Test ACC: {test_acc:.2f}%')
Validation ACC: 74.55%
Test ACC: 73.68%
In [13]:
%watermark -iv
matplotlib  3.1.0
pandas      0.24.2
torch       1.1.0
numpy       1.16.4
PIL.Image   6.0.0
torchvision 0.3.0