#!/usr/bin/env python # coding: utf-8 # Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks. # - Author: Sebastian Raschka # - GitHub Repository: https://github.com/rasbt/deeplearning-models # In[1]: get_ipython().run_line_magic('load_ext', 'watermark') get_ipython().run_line_magic('watermark', "-a 'Sebastian Raschka' -v -p torch") # # Network in Network CIFAR-10 Classifier # based on # # - Lin, Min, Qiang Chen, and Shuicheng Yan. "Network in network." arXiv preprint arXiv:1312.4400 (2013). # ## Imports # In[2]: import os import time import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms import matplotlib.pyplot as plt from PIL import Image if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True # ## Model Settings # In[3]: ########################## ### SETTINGS ########################## # Hyperparameters RANDOM_SEED = 1 LEARNING_RATE = 0.001 BATCH_SIZE = 256 NUM_EPOCHS = 10 # Architecture NUM_CLASSES = 10 # Other DEVICE = "cuda:0" GRAYSCALE = False # The following code cell that implements the ResNet-34 architecture is a derivative of the code provided at https://pytorch.org/docs/0.4.0/_modules/torchvision/models/resnet.html. # In[5]: ########################## ### CIFAR-10 Dataset ########################## # Note transforms.ToTensor() scales input images # to 0-1 range train_dataset = datasets.CIFAR10(root='data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.CIFAR10(root='data', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=8, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, num_workers=8, shuffle=False) # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break # In[4]: ########################## ### MODEL ########################## class NiN(nn.Module): def __init__(self, num_classes): super(NiN, self).__init__() self.num_classes = num_classes self.classifier = nn.Sequential( nn.Conv2d(3, 192, kernel_size=5, stride=1, padding=2), nn.ReLU(inplace=True), nn.Conv2d(192, 160, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.Conv2d(160, 96, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), nn.Dropout(0.5), nn.Conv2d(96, 192, kernel_size=5, stride=1, padding=2), nn.ReLU(inplace=True), nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.AvgPool2d(kernel_size=3, stride=2, padding=1), nn.Dropout(0.5), nn.Conv2d(192, 192, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=True), nn.Conv2d(192, 192, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.Conv2d(192, 10, kernel_size=1, stride=1, padding=0), nn.ReLU(inplace=True), nn.AvgPool2d(kernel_size=8, stride=1, padding=0), ) def forward(self, x): x = self.classifier(x) logits = x.view(x.size(0), self.num_classes) probas = torch.softmax(x, dim=1) return logits, probas # ## Training without Pinned Memory # In[6]: torch.manual_seed(RANDOM_SEED) model = NiN(NUM_CLASSES) model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) # In[7]: def compute_accuracy(model, data_loader, device): correct_pred, num_examples = 0, 0 for i, (features, targets) in enumerate(data_loader): features = features.to(device) targets = targets.to(device) logits, probas = model(features) _, predicted_labels = torch.max(probas, 1) num_examples += targets.size(0) correct_pred += (predicted_labels == targets).sum() return correct_pred.float()/num_examples * 100 start_time = time.time() for epoch in range(NUM_EPOCHS): model.train() for batch_idx, (features, targets) in enumerate(train_loader): features = features.to(DEVICE) targets = targets.to(DEVICE) ### FORWARD AND BACK PROP logits, probas = model(features) cost = F.cross_entropy(logits, targets) optimizer.zero_grad() cost.backward() ### UPDATE MODEL PARAMETERS optimizer.step() ### LOGGING if not batch_idx % 150: print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f' %(epoch+1, NUM_EPOCHS, batch_idx, len(train_loader), cost)) model.eval() with torch.set_grad_enabled(False): # save memory during inference print('Epoch: %03d/%03d | Train: %.3f%%' % ( epoch+1, NUM_EPOCHS, compute_accuracy(model, train_loader, device=DEVICE))) print('Time elapsed: %.2f min' % ((time.time() - start_time)/60)) print('Total Training Time: %.2f min' % ((time.time() - start_time)/60)) with torch.set_grad_enabled(False): # save memory during inference print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader, device=DEVICE))) print('Total Time: %.2f min' % ((time.time() - start_time)/60)) # ## Training with Pinned Memory # In[8]: ########################## ### CIFAR-10 Dataset ########################## # Note transforms.ToTensor() scales input images # to 0-1 range train_dataset = datasets.CIFAR10(root='data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.CIFAR10(root='data', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, pin_memory=True, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, pin_memory=True, shuffle=False) # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break # In[9]: torch.manual_seed(RANDOM_SEED) model = resnet34(NUM_CLASSES) model.to(DEVICE) optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) # In[10]: def compute_accuracy(model, data_loader, device): correct_pred, num_examples = 0, 0 for i, (features, targets) in enumerate(data_loader): features = features.to(device) targets = targets.to(device) logits, probas = model(features) _, predicted_labels = torch.max(probas, 1) num_examples += targets.size(0) correct_pred += (predicted_labels == targets).sum() return correct_pred.float()/num_examples * 100 start_time = time.time() for epoch in range(NUM_EPOCHS): model.train() for batch_idx, (features, targets) in enumerate(train_loader): features = features.to(DEVICE) targets = targets.to(DEVICE) ### FORWARD AND BACK PROP logits, probas = model(features) cost = F.cross_entropy(logits, targets) optimizer.zero_grad() cost.backward() ### UPDATE MODEL PARAMETERS optimizer.step() ### LOGGING if not batch_idx % 150: print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f' %(epoch+1, NUM_EPOCHS, batch_idx, len(train_loader), cost)) model.eval() with torch.set_grad_enabled(False): # save memory during inference print('Epoch: %03d/%03d | Train: %.3f%%' % ( epoch+1, NUM_EPOCHS, compute_accuracy(model, train_loader, device=DEVICE))) print('Time elapsed: %.2f min' % ((time.time() - start_time)/60)) print('Total Training Time: %.2f min' % ((time.time() - start_time)/60)) with torch.set_grad_enabled(False): # save memory during inference print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader, device=DEVICE))) print('Total Time: %.2f min' % ((time.time() - start_time)/60)) # In[11]: get_ipython().run_line_magic('watermark', '-iv')