import fastai
import torch.nn as nn
import torch
import numpy
%matplotlib inline
from torch.autograd import Variable
from torchvision import models
from fastai import conv_learner
import numpy as np
import matplotlib.pyplot as plt
%pylab inline
# pylab.rcParams['figure.figsize'] = (15, 15)
Populating the interactive namespace from numpy and matplotlib
path = '/home/rajat/rajat/fastai_v2/workspace/seed_data/'
indexes = [100,200,210] # just some random data to we will be not be training the model
def get_data(sz):
data = conv_learner.ImageClassifierData.from_csv(path = path, folder='new_train',csv_fname='labels.csv' ,test_name='test',
tfms=conv_learner.tfms_from_model(arch, sz, aug_tfms=conv_learner.transforms_side_on),
val_idxs=indexes, bs=16)
return data
arch = models.resnet50
sz = 256
data = get_data(sz)
learn = conv_learner.ConvLearner.pretrained(arch,data)
# loading already pretrained model on plant seed data
learn.load('256')
model= learn.model
model
Sequential ( (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) (2): ReLU (inplace) (3): MaxPool2d (size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1)) (4): Sequential ( (0): Bottleneck ( (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) (downsample): Sequential ( (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) ) ) (1): Bottleneck ( (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) (2): Bottleneck ( (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) ) (5): Sequential ( (0): Bottleneck ( (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) (downsample): Sequential ( (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) ) ) (1): Bottleneck ( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) (2): Bottleneck ( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) (3): Bottleneck ( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) ) (6): Sequential ( (0): Bottleneck ( (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) (downsample): Sequential ( (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True) ) ) (1): Bottleneck ( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) (2): Bottleneck ( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) (3): Bottleneck ( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) (4): Bottleneck ( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) (5): Bottleneck ( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) ) (7): Sequential ( (0): Bottleneck ( (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) (downsample): Sequential ( (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True) ) ) (1): Bottleneck ( (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) (2): Bottleneck ( (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True) (relu): ReLU (inplace) ) ) (8): AdaptiveConcatPool2d ( (ap): AdaptiveAvgPool2d (output_size=(1, 1)) (mp): AdaptiveMaxPool2d (output_size=(1, 1)) ) (9): Flatten ( ) (10): BatchNorm1d(4096, eps=1e-05, momentum=0.1, affine=True) (11): Dropout (p = 0.25) (12): Linear (4096 -> 512) (13): ReLU () (14): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True) (15): Dropout (p = 0.5) (16): Linear (512 -> 12) (17): LogSoftmax () )
# Some Helper functions
from PIL import Image
def image_loader(path, expand_dim=False):
img = Image.open(path)
img = img.resize((256, 256))
img = np.array(img, dtype=np.float32)
img = np.einsum('ijk->kij', img)
if expand_dim:
img = img[None]
# convert to torch
img = torch.from_numpy(img)
return img
def return_sequential(layer_num, model):
return nn.Sequential(
*list(model.children())[:layer_num]
)
class get_activation_layer(nn.Module):
def __init__(self, model, total_layers):
super().__init__()
self.model = model
self.total_layers = total_layers
self.layer_models = []
for i in range(self.total_layers):
self.layer_models.append(return_sequential(i, self.model))
def forward(self, x):
self.outputs = []
for i in range(self.total_layers):
self.outputs.append(self.layer_models[i](x))
return self.outputs
tmp_model = get_activation_layer(model, 16)
!mkdir layer_op
mkdir: cannot create directory ‘layer_op’: File exists
# This writes layer outputs to file
def visulaize_layers(outputs):
for index, layer in enumerate(outputs):
features = layer.data
size_plot = features.shape[1]
if size_plot % 2 != 0:
size_plot += 1
original_size = np.int(np.ceil(np.sqrt(size_plot)))
f, axarr = plt.subplots(original_size + 1, original_size + 1)
i, j = 0,0
counter = 1
for blocks in features:
for block in blocks:
counter += 1
x = block.cpu().numpy()
if counter % original_size == 0:
i += 1
j = 0
axarr[i,j].imshow(x)
j += 1
counter = 0
print(f'layer {index} done')
f.savefig(f'layer_op/output{index}.jpg')
print('image generated')
img_path = 'train/Black-grass/34a672a63.png'
i = image_loader('train/Black-grass/34a672a63.png', expand_dim=True)
i = i.cuda()
layer_outputs = tmp_model(Variable(i))
from IPython.display import Image as I
I(img_path)
im = plt.imread('layer_op/output0.jpg')
pylab.rcParams['figure.figsize'] = (15,15)
plt.imshow(im)
<matplotlib.image.AxesImage at 0x7f6a93230518>
As Layer 1 has 3 filter.
It's Structure is Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
im = plt.imread('layer_op/output1.jpg')
pylab.rcParams['figure.figsize'] = (20, 20)
plt.imshow(im)
<matplotlib.image.AxesImage at 0x7f6a92d6acc0>
After applying Relu.
im = plt.imread('layer_op/output2.jpg')
pylab.rcParams['figure.figsize'] = (20, 20)
plt.imshow(im)
<matplotlib.image.AxesImage at 0x7f6a930d44a8>
After Maxpooling MaxPool2d (size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1))
im = plt.imread('layer_op/output3.jpg')
pylab.rcParams['figure.figsize'] = (20, 20)
plt.imshow(im)
<matplotlib.image.AxesImage at 0x7f6a92dbac50>
(4): Sequential (
(0): Bottleneck (
(conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU (inplace)
(downsample): Sequential (
(0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
)
)
(1): Bottleneck (
(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU (inplace)
)
(2): Bottleneck (
(conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU (inplace)
)
)
Image at (0,1) showing maximum relationship with the input image
im = plt.imread('layer_op/output4.jpg')
pylab.rcParams['figure.figsize'] = (20, 20)
plt.imshow(im)
<matplotlib.image.AxesImage at 0x7f6a92eea470>
(5): Sequential (
(0): Bottleneck (
(conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU (inplace)
(downsample): Sequential (
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
)
)
(1): Bottleneck (
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU (inplace)
)
(2): Bottleneck (
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU (inplace)
)
(3): Bottleneck (
(conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
(relu): ReLU (inplace)
)
)
im = plt.imread('layer_op/output5.jpg')
pylab.rcParams['figure.figsize'] = (20, 20)
plt.imshow(im)
<matplotlib.image.AxesImage at 0x7f6a9301ebe0>
im = plt.imread('layer_op/output6.jpg')
pylab.rcParams['figure.figsize'] = (20, 20)
plt.imshow(im)
<matplotlib.image.AxesImage at 0x7f6a930573c8>
We can furthur plot the layers, but it is becoming a little bit difficult for to understand. As More complexity in sturucture tries to understand more complex structure.
!pip install Pandoc
Collecting Pandoc Downloading pandoc-1.0.2.tar.gz (488kB) 100% |████████████████████████████████| 491kB 2.4MB/s ta 0:00:011 Collecting ply (from Pandoc) Building wheels for collected packages: Pandoc Running setup.py bdist_wheel for Pandoc ... done Stored in directory: /home/rajat/.cache/pip/wheels/08/c5/30/55eda9e3884c232e85c413b551e8bade2700e92e4094531270 Successfully built Pandoc Installing collected packages: ply, Pandoc Successfully installed Pandoc-1.0.2 ply-3.10