In [1]:
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, Conv3D, BatchNormalization
from keras import backend as K
import os
from PIL import Image
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import pandas as pd
Using TensorFlow backend.
In [2]:
enc = OneHotEncoder()
enc.fit([[0], [1]]) 
def names(number):
    if(number == 0):
        return 'Tumor'
    else:
        return 'Normal'
C:\Users\Vee\Anaconda3\lib\site-packages\sklearn\preprocessing\_encoders.py:415: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.
If you want the future behaviour and silence this warning, you can specify "categories='auto'".
In case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.
  warnings.warn(msg, FutureWarning)
In [3]:
data = []
paths = []
ans = []
for r, d, f in os.walk(r"D:\Downloads\brain-mri-images-for-brain-tumor-detection\yes"):
    for file in f:
        if '.jpg' in file:
            paths.append(os.path.join(r, file))

for path in paths:
    img = Image.open(path)
    x = img.resize((128,128))
    data.append(np.array(x))
    ans.append(enc.transform([[0]]).toarray())
In [4]:
paths = []
for r, d, f in os.walk(r"D:\Downloads\brain-mri-images-for-brain-tumor-detection\no"):
    for file in f:
        if '.jpg' in file:
            paths.append(os.path.join(r, file))

for path in paths:
    img = Image.open(path)
    x = img.resize((128,128))
    data.append(np.array(x))
    ans.append(enc.transform([[1]]).toarray())
In [5]:
data = np.array(data)
data.shape
Out[5]:
(139, 128, 128, 3)
In [6]:
ans = np.array(ans)
ans = ans.reshape(139,2)
In [7]:
#splitting data into train and test sets. 3/4 train, 1/4 test.
x_train,x_test,y_train,y_test = train_test_split(data, ans, test_size=0.2, shuffle=True, random_state=69)
In [16]:
model = Sequential()

model.add(Conv2D(32, kernel_size=(5, 5), input_shape=(128, 128, 3), padding = 'Same'))
model.add(Conv2D(32, kernel_size=(5, 5),  activation ='selu', padding = 'Same'))


model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(64, kernel_size = (3,3), activation ='selu', padding = 'Same'))
model.add(Conv2D(64, kernel_size = (3,3), activation ='selu', padding = 'Same'))

model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))

model.add(Flatten())

model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))

model.compile(loss = "categorical_crossentropy", optimizer='Adamax')
print(model.summary())
Model: "sequential_3"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_9 (Conv2D)            (None, 128, 128, 32)      2432      
_________________________________________________________________
conv2d_10 (Conv2D)           (None, 128, 128, 32)      25632     
_________________________________________________________________
batch_normalization_1 (Batch (None, 128, 128, 32)      128       
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 64, 64, 32)        0         
_________________________________________________________________
dropout_7 (Dropout)          (None, 64, 64, 32)        0         
_________________________________________________________________
conv2d_11 (Conv2D)           (None, 64, 64, 64)        18496     
_________________________________________________________________
conv2d_12 (Conv2D)           (None, 64, 64, 64)        36928     
_________________________________________________________________
batch_normalization_2 (Batch (None, 64, 64, 64)        256       
_________________________________________________________________
max_pooling2d_6 (MaxPooling2 (None, 32, 32, 64)        0         
_________________________________________________________________
dropout_8 (Dropout)          (None, 32, 32, 64)        0         
_________________________________________________________________
flatten_3 (Flatten)          (None, 65536)             0         
_________________________________________________________________
dense_7 (Dense)              (None, 512)               33554944  
_________________________________________________________________
dropout_9 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_8 (Dense)              (None, 2)                 1026      
=================================================================
Total params: 33,639,842
Trainable params: 33,639,650
Non-trainable params: 192
_________________________________________________________________
None
In [17]:
history = model.fit(x_train, y_train, epochs=15, batch_size=40, verbose=1,validation_data=(x_test, y_test))
Train on 111 samples, validate on 28 samples
Epoch 1/15
111/111 [==============================] - 1s 6ms/step - loss: 107.3591 - val_loss: 918.3828
Epoch 2/15
111/111 [==============================] - 0s 2ms/step - loss: 65.7928 - val_loss: 356.9780
Epoch 3/15
111/111 [==============================] - 0s 2ms/step - loss: 33.9498 - val_loss: 273.0914
Epoch 4/15
111/111 [==============================] - 0s 2ms/step - loss: 16.6639 - val_loss: 167.3911
Epoch 5/15
111/111 [==============================] - 0s 2ms/step - loss: 6.9701 - val_loss: 55.2735
Epoch 6/15
111/111 [==============================] - 0s 2ms/step - loss: 8.7837 - val_loss: 57.0248
Epoch 7/15
111/111 [==============================] - 0s 2ms/step - loss: 4.9218 - val_loss: 62.3245
Epoch 8/15
111/111 [==============================] - 0s 2ms/step - loss: 5.6524 - val_loss: 33.6150
Epoch 9/15
111/111 [==============================] - 0s 2ms/step - loss: 3.2008 - val_loss: 23.5095
Epoch 10/15
111/111 [==============================] - 0s 2ms/step - loss: 1.5713 - val_loss: 20.3074
Epoch 11/15
111/111 [==============================] - 0s 2ms/step - loss: 1.0174 - val_loss: 18.3633
Epoch 12/15
111/111 [==============================] - 0s 2ms/step - loss: 0.6053 - val_loss: 17.0944
Epoch 13/15
111/111 [==============================] - 0s 2ms/step - loss: 0.3631 - val_loss: 15.4051
Epoch 14/15
111/111 [==============================] - 0s 2ms/step - loss: 0.5016 - val_loss: 14.5632
Epoch 15/15
111/111 [==============================] - 0s 2ms/step - loss: 0.8183 - val_loss: 14.0119
In [18]:
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Test', 'Validation'], loc='upper right')
plt.show()
In [23]:
img = Image.open(r"D:\Downloads\brain-mri-images-for-brain-tumor-detection\no\N17.jpg")
x = np.array(img.resize((128,128)))
x = x.reshape(1,128,128,3)
answ = model.predict_on_batch(x)
classification = np.where(answ == np.amax(answ))[1][0]
imshow(img)
print(str(answ[0][classification]*100) + '% Confidence This Is ' + names(classification))
100.0% Confidence This Is Normal
In [24]:
img = Image.open(r"D:\Downloads\brain-mri-images-for-brain-tumor-detection\yes\Y3.jpg")
x = np.array(img.resize((128,128)))
x = x.reshape(1,128,128,3)
answ = model.predict_on_batch(x)
classification = np.where(answ == np.amax(answ))[1][0]
imshow(img)
print(str(answ[0][classification]*100) + '% Confidence This Is A ' + names(classification))
100.0% Confidence This Is A Tumor