In :
%matplotlib inline
import torch
import torch.nn as nn

In :
# Warmup:

# We will explore how nn.Linear layers work by passing it a
# random tensor as input (tensor with random values)

# Predicting the size of the output is a useful exercise to see
# if we really understand what the layer is doing

In :
layer = nn.Linear(in_features=100, out_features=30)

In :
x = torch.randn(1, 100) # tensor consisting of random numbers

In :
x.shape

Out:
torch.Size([1, 100])
In :


In :
y.shape # [1, 30]

Out:
torch.Size([1, 30])
In :
x = torch.randn(5, 100) # changed 1 => 5
y = layer(x)
y.shape

Out:
torch.Size([5, 30])
In :
x = torch.randn(5, 90) # changed 100 => 90
y = layer(x)
y.shape

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-54-213a4d91b660> in <module>
1 x = torch.randn(5, 90) # changed 100 => 90
----> 2 y = layer(x)
3 y.shape

~/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
475             result = self._slow_forward(*input, **kwargs)
476         else:
--> 477             result = self.forward(*input, **kwargs)
478         for hook in self._forward_hooks.values():
479             hook_result = hook(self, input, result)

~/miniconda3/lib/python3.6/site-packages/torch/nn/modules/linear.py in forward(self, input)
53
54     def forward(self, input):
---> 55         return F.linear(input, self.weight, self.bias)
56
57     def extra_repr(self):

~/miniconda3/lib/python3.6/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1022     if input.dim() == 2 and bias is not None:
1023         # fused op is marginally faster
1025
1026     output = input.matmul(weight.t())

RuntimeError: size mismatch, m1: [5 x 90], m2: [100 x 30] at /Users/soumith/miniconda2/conda-bld/pytorch_1532623076075/work/aten/src/TH/generic/THTensorMath.cpp:2070
In :
conv = nn.Conv2d(in_channels=3,
out_channels=7,
kernel_size=5)

In :
conv_paramters = list(conv.parameters())

In :
len(conv_paramters)

Out:
2
In :
conv_paramters.shape # kernel

Out:
torch.Size([7, 3, 5, 5])
In :
conv_paramters.shape # bias

Out:
torch.Size()
In :
x = torch.randn(1, 3, 128, 128) # 128 pixel x 128 pixle coloured image
# batch size = 1

In :
# This format is called the NCHW format
#   N = number
#   C = channel
#   H = height
#   W = width

In :
conv = nn.Conv2d(in_channels=3,
out_channels=7,
kernel_size=5)
y = conv(x)
y.shape

Out:
torch.Size([1, 7, 124, 124])
In :
x = torch.randn(16, 3, 128, 128) # change 1 => 16
y = conv(x)
y.shape

Out:
torch.Size([16, 7, 124, 124])
In :
# add padding
conv = nn.Conv2d(in_channels=3,
out_channels=7,
kernel_size=5,
x = torch.randn(16, 3, 128, 128) # change 1 => 16
y = conv(x)
y.shape

Out:
torch.Size([16, 7, 126, 126])
In :
# add padding
conv = nn.Conv2d(in_channels=3,
out_channels=7,
kernel_size=5,
x = torch.randn(16, 3, 128, 128) # change 1 => 16
y = conv(x)
y.shape

Out:
torch.Size([16, 7, 174, 174])
In :
# add padding 3
conv = nn.Conv2d(in_channels=3,
out_channels=7,
kernel_size=5,
x = torch.randn(16, 3, 128, 128) # change 1 => 16
y = conv(x)
y.shape

Out:
torch.Size([16, 7, 130, 130])
In :
# add padding 2
conv = nn.Conv2d(in_channels=3,
out_channels=7,
kernel_size=5,
x = torch.randn(16, 3, 128, 128) # change 1 => 16
y = conv(x)
y.shape

Out:
torch.Size([16, 7, 128, 128])
In :
print("input:", x.shape)
print("output:", y.shape)

input: torch.Size([16, 3, 128, 128])
output: torch.Size([16, 7, 128, 128])

In :
# make kernel_size bigger doesn't change the size of output

conv = nn.Conv2d(in_channels=3,
out_channels=7,
kernel_size=7,
x = torch.randn(16, 3, 128, 128) # change 1 => 16
y = conv(x)
print("input:", x.shape)
print("output:", y.shape)

input: torch.Size([16, 3, 128, 128])
output: torch.Size([16, 7, 128, 128])

In :
pool_layer = nn.MaxPool2d(2, 2)

In :
y_pooled = pool_layer(torch.relu(y))

In :
y_pooled.shape
# batch size unchanged
# number of channels unchanged
# h/w divided by 2

Out:
torch.Size([16, 7, 64, 64])
In :
# set stride = 2

conv = nn.Conv2d(in_channels=3,
out_channels=7,
kernel_size=7,
x = torch.randn(16, 3, 128, 128)
y = conv(x)
print("input:", x.shape)
print("output:", y.shape)

input: torch.Size([16, 3, 128, 128])
output: torch.Size([16, 7, 64, 64])

In :
# alexnet
import torchvision.models

alexNet = torchvision.models.alexnet(pretrained=True)

In :
alexNet

Out:
AlexNet(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
(1): ReLU(inplace)
(2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(4): ReLU(inplace)
(5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(7): ReLU(inplace)
(8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(9): ReLU(inplace)
(10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(classifier): Sequential(
(0): Dropout(p=0.5)
(1): Linear(in_features=9216, out_features=4096, bias=True)
(2): ReLU(inplace)
(3): Dropout(p=0.5)
(4): Linear(in_features=4096, out_features=4096, bias=True)
(5): ReLU(inplace)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
In :
alexNet.features(x).shape

Out:
torch.Size([16, 256, 3, 3])
In :
# lenet verification


torch.Size([1, 1, 28, 28])