import cv2
import numpy as np
import pandas as pd
from random import shuffle
import os
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
from sklearn import metrics
from PIL import Image
import pickle
from mpl_toolkits.axes_grid1 import ImageGrid
import keras
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.applications.inception_v3 import InceptionV3
from keras.applications.densenet import DenseNet121
from keras.preprocessing import image
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler, Callback, CSVLogger
from keras.preprocessing.image import ImageDataGenerator, array_to_img
from keras.applications.vgg16 import VGG16
from keras.applications.resnet50 import ResNet50
from keras.applications.xception import Xception
from keras.models import Model, Sequential
from keras.utils import to_categorical
from keras.layers import Input, Dense, BatchNormalization, Flatten, Dropout, Convolution2D, Activation, MaxPooling2D, GlobalAveragePooling2D
from random import shuffle
import math
from keras import losses
#master_path = "C:\\Users\\pochetti\\WorkDocs\\Desktop\\Fra\\Francesco\\Kaggle\\Invasive"
master_path = "/home/paperspace/Invasive"
Using TensorFlow backend. /home/paperspace/anaconda3/lib/python3.6/importlib/_bootstrap.py:205: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6 return f(*args, **kwds)
def load_train(path):
train_set = pd.read_csv(os.path.join(os.sep, master_path, 'train_labels.csv'))
train_label = np.array(train_set['invasive'].iloc[: ])
train_files = []
for i in range(len(train_set)):
train_files.append(path + os.sep + str(int(train_set.iloc[i][0])) +'.jpg')
train_set['name'] = train_files
return train_files, train_set, train_label
############################################################################################
############################################################################################
def augment(src, choice):
if choice == 0:
# Rotate 90
src = np.rot90(src, 1)
if choice == 1:
# flip vertically
src = np.flipud(src)
if choice == 2:
# Rotate 180
src = np.rot90(src, 2)
if choice == 3:
# flip horizontally
src = np.fliplr(src)
if choice == 4:
# Rotate 90 counter-clockwise
src = np.rot90(src, 3)
if choice == 5:
# Rotate 180 and flip horizontally
src = np.rot90(src, 2)
src = np.fliplr(src)
if choice == 6:
# leave it as is
src = src
return src
############################################################################################
############################################################################################
def read_augment_save(frompath, topath):
img = Image.open(frompath)
new_array = np.array(img)
new_array = augment(new_array, np.random.randint(6))
img = Image.fromarray(new_array.astype(np.uint8))
img.save(topath)
return
############################################################################################
############################################################################################
def preprocess_input_resnet50(x):
from keras.applications.resnet50 import preprocess_input
X = np.expand_dims(x, axis=0)
X = preprocess_input(X)
return X[0]
def preprocess_input_vgg16(x):
from keras.applications.vgg16 import preprocess_input
X = np.expand_dims(x, axis=0)
X = preprocess_input(X)
return X[0]
def preprocess_input_inception(x):
from keras.applications.inception_v3 import preprocess_input
X = np.expand_dims(x, axis=0)
X = preprocess_input(X)
return X[0]
def preprocess_input_densenet(x):
from keras.applications.densenet import preprocess_input
X = np.expand_dims(x, axis=0)
X = preprocess_input(X)
return X[0]
############################################################################################
############################################################################################
def prepare_image(filepath, size, preprocessing_function):
img = Image.open(filepath)
img = img.resize(size, Image.ANTIALIAS)
img = np.array(img).astype(np.float64)
img = augment(img, np.random.randint(7))
img = preprocessing_function(img)
return img
############################################################################################
############################################################################################
def data_generator(data, which_net, size=(800,800), batch_size=8):
if which_net == 'resnet50':
preprocessing_function=preprocess_input_resnet50
elif which_net == 'densenet':
preprocessing_function=preprocess_input_densenet
elif which_net == 'inception':
preprocessing_function=preprocess_input_inception
elif which_net == 'vgg':
preprocessing_function=preprocess_input_vgg16
while True:
for start in range(0, len(data), batch_size):
x_batch = []
y_batch = []
end = min(start + batch_size, len(data))
data_batch = data[start:end]
for filepath, tag in data_batch.values:
img = prepare_image(filepath, size, preprocessing_function)
x_batch.append(img)
y_batch.append(tag)
x_batch = np.array(x_batch)
y_batch = np.array(y_batch, np.uint8)
yield x_batch, y_batch
############################################################################################
############################################################################################
def get_model(which_net, img_dim=(800,800,3)):
if which_net == 'resnet50':
base_model = ResNet50(include_top=False, weights='imagenet',input_shape=img_dim)
elif which_net == 'inception':
base_model = InceptionV3(include_top=False, weights='imagenet',input_shape=img_dim)
elif which_net == 'densenet':
base_model = DenseNet121(include_top=False, weights='imagenet',input_shape=img_dim)
input_tensor = Input(shape=img_dim)
bn = BatchNormalization()(input_tensor)
x = base_model(bn)
x = GlobalAveragePooling2D()(x)
x = Dropout(0.5)(x)
output = Dense(1, activation='sigmoid')(x)
model = Model(input_tensor, output)
return model
train_files, train_set, train_label = load_train(master_path + os.sep + 'train')
train_set.head()
name | invasive | |
---|---|---|
0 | /home/paperspace/Invasive/train/1.jpg | 0 |
1 | /home/paperspace/Invasive/train/2.jpg | 0 |
2 | /home/paperspace/Invasive/train/3.jpg | 1 |
3 | /home/paperspace/Invasive/train/4.jpg | 0 |
4 | /home/paperspace/Invasive/train/5.jpg | 1 |
train_set.shape
(2295, 2)
f, ax = plt.subplots(1,1,figsize=(8,6))
ax = sns.barplot(x=['Not Invasive','Invasive'], y=train_set.groupby(['invasive'],as_index=False).count()['name'])
ax.set(ylabel='Count of Samples')
plt.show()
train_files, train_set, train_label = load_train(master_path + os.sep + 'train')
not_inv_to_add = 601
not_inv = train_set.loc[train_set.invasive == 0,:].sample(not_inv_to_add, random_state=3)
max_file = pd.read_csv(os.path.join(os.sep, master_path, 'train_labels.csv')).name.max()
for i in range(max_file + 1, max_file + not_inv_to_add + 1):
frompath = not_inv.iloc[i - max_file - 1, 0]
topath = os.path.join(master_path, 'train', str(i) + '.jpg')
read_augment_save(frompath, topath)
train_set = train_set.append(pd.DataFrame({'name': [topath], 'invasive': [0]})[['name', 'invasive']], ignore_index=True)
train_set.to_pickle(os.path.join(master_path, 'train_set.pkl'))
f, ax = plt.subplots(1,1,figsize=(8,6)) ax = sns.barplot(x=['Not Invasive','Invasive'], y=train_set.groupby(['invasive'],as_index=False).count()['name']) ax.set(ylabel='Count of Samples') plt.show()
fig = plt.figure(1, figsize=(16, 16))
invas_dict ={0: 'Not Invasive', 1: 'Invasive'}
grid = ImageGrid(fig, 111, nrows_ncols=(4, 4), axes_pad=0.05)
for i, (img_path, invasive) in enumerate(train_set.sample(16).values):
ax = grid[i]
img = image.load_img(img_path)
ax.imshow(img)
ax.text(10, 200, 'LABEL: %s' % invas_dict[invasive], color='w', backgroundcolor='k', alpha=0.8)
ax.axis('off')
plt.show()
x, y = next(data_generator(train_set, 'inception', batch_size=16))
fig = plt.figure(1, figsize=(16, 16))
invas_dict ={0: 'Not Invasive', 1: 'Invasive'}
grid = ImageGrid(fig, 111, nrows_ncols=(4, 4), axes_pad=0.05)
for i, (img, invasive) in enumerate(zip(x, y)):
ax = grid[i]
ax.imshow(img)
ax.text(10, 200, 'LABEL: %s' % invas_dict[invasive], color='w', backgroundcolor='k', alpha=0.8)
ax.axis('off')
plt.show()
train_set = pd.read_pickle(os.path.join(master_path, 'train_set.pkl'))
X_train, X_test, y_train, y_test = train_test_split(train_set, train_set.invasive, test_size=0.15, random_state=42)
print(X_train.shape, X_test.shape)
(2461, 2) (435, 2)
res = get_model('resnet50', img_dim=(300,300,3))
res.summary()
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_2 (InputLayer) (None, 300, 300, 3) 0 _________________________________________________________________ batch_normalization_1 (Batch (None, 300, 300, 3) 12 _________________________________________________________________ resnet50 (Model) (None, 1, 1, 2048) 23587712 _________________________________________________________________ global_average_pooling2d_1 ( (None, 2048) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 2048) 0 _________________________________________________________________ dense_1 (Dense) (None, 1) 2049 ================================================================= Total params: 23,589,773 Trainable params: 23,536,647 Non-trainable params: 53,126 _________________________________________________________________
incept = get_model('inception', img_dim=(300,300,3))
incept.summary()
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_4 (InputLayer) (None, 300, 300, 3) 0 _________________________________________________________________ batch_normalization_96 (Batc (None, 300, 300, 3) 12 _________________________________________________________________ inception_v3 (Model) (None, 8, 8, 2048) 21802784 _________________________________________________________________ global_average_pooling2d_2 ( (None, 2048) 0 _________________________________________________________________ dropout_2 (Dropout) (None, 2048) 0 _________________________________________________________________ dense_2 (Dense) (None, 1) 2049 ================================================================= Total params: 21,804,845 Trainable params: 21,770,407 Non-trainable params: 34,438 _________________________________________________________________
batch_size = 8
size = (300, 300)
img_dim = (300, 300, 3)
epochs = 30
kf = KFold(n_splits=5, shuffle=True, random_state=3)
scores_res_v = []
scores_incep_v = []
scores_res_t = []
scores_incep_t = []
x = X_train
y = X_train.invasive
for i, (train_index, test_index) in enumerate(kf.split(x)):
x_train = x.iloc[train_index]; x_valid = x.iloc[test_index]
y_train = y.iloc[train_index]; y_valid = y.iloc[test_index]
train_steps = len(x_train) / batch_size
valid_steps = len(x_valid) / batch_size
res = get_model('resnet50', img_dim=img_dim)
incep = get_model('inception', img_dim=img_dim)
early = EarlyStopping(monitor='val_loss', patience=3, verbose=1, min_delta=1e-4)
reducelr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=1, cooldown=1, verbose=1, min_lr=1e-7)
checkincept = ModelCheckpoint(filepath=os.path.join(master_path, 'inception.fold_' + str(i) + '.hdf5'), verbose=1,save_best_only=True,
save_weights_only=True, mode='auto')
checkres = ModelCheckpoint(filepath=os.path.join(master_path, 'resnet.fold_' + str(i) + '.hdf5'), verbose=1,save_best_only=True,
save_weights_only=True, mode='auto')
res.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy', metrics = ['accuracy'])
print('Fitting ResNet Fold ' + str(i))
res.fit_generator(data_generator(x_train, 'resnet50', size=size, batch_size=batch_size),
train_steps, epochs=epochs, verbose=2,
validation_data=data_generator(x_valid, 'resnet50', size=size, batch_size=batch_size),
validation_steps=valid_steps, callbacks=[early, reducelr, checkres])
with open(os.path.join(master_path, 'resnet.history.fold_' + str(i) + '.pickle'), 'wb') as handle:
pickle.dump(res.history.history, handle, protocol=pickle.HIGHEST_PROTOCOL)
incep.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy', metrics = ['accuracy'])
print('Fitting Inception Fold ' + str(i))
incep.fit_generator(data_generator(x_train, 'inception', size=size, batch_size=batch_size),
train_steps, epochs=epochs, verbose=2,
validation_data=data_generator(x_valid, 'inception', size=size, batch_size=batch_size),
validation_steps=valid_steps, callbacks=[early, reducelr, checkincept])
with open(os.path.join(master_path, 'inception.history.fold_' + str(i) + '.pickle'), 'wb') as handle:
pickle.dump(incep.history.history, handle, protocol=pickle.HIGHEST_PROTOCOL)
res.load_weights(filepath=os.path.join(master_path, 'resnet.fold_' + str(i) + '.hdf5'))
incep.load_weights(filepath=os.path.join(master_path, 'inception.fold_' + str(i) + '.hdf5'))
preds_valid_res = res.predict_generator(generator=data_generator(x_valid, 'resnet50', size=size, batch_size=batch_size),
steps=valid_steps, verbose=0)[:, 0]
preds_train_res = res.predict_generator(generator=data_generator(x_train, 'resnet50', size=size, batch_size=batch_size),
steps=train_steps, verbose=0)[:, 0]
preds_valid_incep = incep.predict_generator(generator=data_generator(x_valid, 'inception', size=size, batch_size=batch_size),
steps=valid_steps, verbose=0)[:, 0]
preds_train_incep = incep.predict_generator(generator=data_generator(x_train, 'inception', size=size, batch_size=batch_size),
steps=train_steps, verbose=0)[:, 0]
scores_res_v.append(metrics.accuracy_score(y_valid, np.where(preds_valid_res > 0.5, 1, 0)))
scores_incep_v.append(metrics.accuracy_score(y_valid, np.where(preds_valid_incep > 0.5, 1, 0)))
scores_res_t.append(metrics.accuracy_score(y_train, np.where(preds_train_res > 0.5, 1, 0)))
scores_incep_t.append(metrics.accuracy_score(y_train, np.where(preds_train_incep > 0.5, 1, 0)))
to_be_saved = [scores_res_v, scores_incep_v, scores_res_t, scores_incep_t]
with open(os.path.join(master_path, 'scores.pickle'), 'wb') as handle:
pickle.dump(to_be_saved, handle, protocol=pickle.HIGHEST_PROTOCOL)
Fitting ResNet Fold 0 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.15619, saving model to /home/paperspace/Invasive/resnet.fold_0.hdf5 - 155s - loss: 0.2565 - acc: 0.9050 - val_loss: 0.1562 - val_acc: 0.9533 Epoch 2/30 Epoch 00002: val_loss improved from 0.15619 to 0.11835, saving model to /home/paperspace/Invasive/resnet.fold_0.hdf5 - 130s - loss: 0.1432 - acc: 0.9543 - val_loss: 0.1184 - val_acc: 0.9655 Epoch 3/30 Epoch 00003: val_loss improved from 0.11835 to 0.08123, saving model to /home/paperspace/Invasive/resnet.fold_0.hdf5 - 130s - loss: 0.1094 - acc: 0.9634 - val_loss: 0.0812 - val_acc: 0.9757 Epoch 4/30 Epoch 00004: val_loss did not improve - 129s - loss: 0.0602 - acc: 0.9802 - val_loss: 0.1926 - val_acc: 0.9513 Epoch 5/30 Epoch 00005: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00005: val_loss did not improve - 133s - loss: 0.0550 - acc: 0.9832 - val_loss: 0.1050 - val_acc: 0.9716 Epoch 6/30 Epoch 00006: val_loss did not improve - 129s - loss: 0.0396 - acc: 0.9888 - val_loss: 0.0838 - val_acc: 0.9858 Epoch 00006: early stopping Fitting Inception Fold 0 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.08287, saving model to /home/paperspace/Invasive/inception.fold_0.hdf5 - 135s - loss: 0.2324 - acc: 0.9111 - val_loss: 0.0829 - val_acc: 0.9736 Epoch 2/30 Epoch 00002: val_loss did not improve - 114s - loss: 0.1003 - acc: 0.9675 - val_loss: 0.1010 - val_acc: 0.9675 Epoch 3/30 Epoch 00003: val_loss improved from 0.08287 to 0.05786, saving model to /home/paperspace/Invasive/inception.fold_0.hdf5 - 115s - loss: 0.0740 - acc: 0.9751 - val_loss: 0.0579 - val_acc: 0.9817 Epoch 4/30 Epoch 00004: val_loss did not improve - 115s - loss: 0.0340 - acc: 0.9883 - val_loss: 0.0591 - val_acc: 0.9878 Epoch 5/30 Epoch 00005: val_loss improved from 0.05786 to 0.04473, saving model to /home/paperspace/Invasive/inception.fold_0.hdf5 - 115s - loss: 0.0462 - acc: 0.9842 - val_loss: 0.0447 - val_acc: 0.9817 Epoch 6/30 Epoch 00006: val_loss improved from 0.04473 to 0.03417, saving model to /home/paperspace/Invasive/inception.fold_0.hdf5 - 115s - loss: 0.0252 - acc: 0.9939 - val_loss: 0.0342 - val_acc: 0.9878 Epoch 7/30 Epoch 00007: val_loss did not improve - 115s - loss: 0.0321 - acc: 0.9909 - val_loss: 0.0424 - val_acc: 0.9797 Epoch 8/30 Epoch 00008: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00008: val_loss did not improve - 118s - loss: 0.0248 - acc: 0.9924 - val_loss: 0.0817 - val_acc: 0.9757 Epoch 9/30 Epoch 00009: val_loss did not improve - 114s - loss: 0.0065 - acc: 0.9985 - val_loss: 0.0554 - val_acc: 0.9838 Epoch 00009: early stopping Fitting ResNet Fold 1 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.28693, saving model to /home/paperspace/Invasive/resnet.fold_1.hdf5 - 156s - loss: 0.2539 - acc: 0.9023 - val_loss: 0.2869 - val_acc: 0.9187 Epoch 2/30 Epoch 00002: val_loss improved from 0.28693 to 0.14894, saving model to /home/paperspace/Invasive/resnet.fold_1.hdf5 - 130s - loss: 0.1433 - acc: 0.9509 - val_loss: 0.1489 - val_acc: 0.9533 Epoch 3/30 Epoch 00003: val_loss did not improve - 130s - loss: 0.1036 - acc: 0.9691 - val_loss: 0.1589 - val_acc: 0.9614 Epoch 4/30 Epoch 00004: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00004: val_loss did not improve - 134s - loss: 0.0675 - acc: 0.9752 - val_loss: 0.1855 - val_acc: 0.9593 Epoch 5/30 Epoch 00005: val_loss improved from 0.14894 to 0.09956, saving model to /home/paperspace/Invasive/resnet.fold_1.hdf5 - 130s - loss: 0.0380 - acc: 0.9848 - val_loss: 0.0996 - val_acc: 0.9756 Epoch 6/30 Epoch 00006: val_loss improved from 0.09956 to 0.09497, saving model to /home/paperspace/Invasive/resnet.fold_1.hdf5 - 130s - loss: 0.0287 - acc: 0.9914 - val_loss: 0.0950 - val_acc: 0.9695 Epoch 7/30 Epoch 00007: val_loss did not improve - 130s - loss: 0.0184 - acc: 0.9934 - val_loss: 0.1056 - val_acc: 0.9797 Epoch 8/30 Epoch 00008: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07. Epoch 00008: val_loss did not improve - 130s - loss: 0.0161 - acc: 0.9954 - val_loss: 0.0989 - val_acc: 0.9797 Epoch 9/30 Epoch 00009: val_loss improved from 0.09497 to 0.09153, saving model to /home/paperspace/Invasive/resnet.fold_1.hdf5 - 130s - loss: 0.0181 - acc: 0.9934 - val_loss: 0.0915 - val_acc: 0.9756 Epoch 10/30 Epoch 00010: val_loss did not improve - 130s - loss: 0.0100 - acc: 0.9985 - val_loss: 0.1025 - val_acc: 0.9776 Epoch 11/30 Epoch 00011: ReduceLROnPlateau reducing learning rate to 1e-07. Epoch 00011: val_loss did not improve - 130s - loss: 0.0122 - acc: 0.9965 - val_loss: 0.1101 - val_acc: 0.9736 Epoch 12/30 Epoch 00012: val_loss did not improve - 130s - loss: 0.0095 - acc: 0.9980 - val_loss: 0.0953 - val_acc: 0.9715 Epoch 00012: early stopping Fitting Inception Fold 1 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.12539, saving model to /home/paperspace/Invasive/inception.fold_1.hdf5 - 141s - loss: 0.2487 - acc: 0.9054 - val_loss: 0.1254 - val_acc: 0.9695 Epoch 2/30 Epoch 00002: val_loss improved from 0.12539 to 0.08857, saving model to /home/paperspace/Invasive/inception.fold_1.hdf5 - 115s - loss: 0.1165 - acc: 0.9605 - val_loss: 0.0886 - val_acc: 0.9675 Epoch 3/30 Epoch 00003: val_loss improved from 0.08857 to 0.06837, saving model to /home/paperspace/Invasive/inception.fold_1.hdf5 - 115s - loss: 0.0999 - acc: 0.9666 - val_loss: 0.0684 - val_acc: 0.9837 Epoch 4/30 Epoch 00004: val_loss did not improve - 115s - loss: 0.0628 - acc: 0.9777 - val_loss: 0.0756 - val_acc: 0.9797 Epoch 5/30 Epoch 00005: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00005: val_loss did not improve - 120s - loss: 0.0488 - acc: 0.9874 - val_loss: 0.0787 - val_acc: 0.9776 Epoch 6/30 Epoch 00006: val_loss improved from 0.06837 to 0.06648, saving model to /home/paperspace/Invasive/inception.fold_1.hdf5 - 115s - loss: 0.0258 - acc: 0.9919 - val_loss: 0.0665 - val_acc: 0.9817 Epoch 7/30 Epoch 00007: val_loss improved from 0.06648 to 0.05875, saving model to /home/paperspace/Invasive/inception.fold_1.hdf5 - 115s - loss: 0.0214 - acc: 0.9919 - val_loss: 0.0587 - val_acc: 0.9837 Epoch 8/30 Epoch 00008: val_loss did not improve - 115s - loss: 0.0225 - acc: 0.9929 - val_loss: 0.0597 - val_acc: 0.9817 Epoch 9/30 Epoch 00009: val_loss improved from 0.05875 to 0.05710, saving model to /home/paperspace/Invasive/inception.fold_1.hdf5 - 115s - loss: 0.0117 - acc: 0.9939 - val_loss: 0.0571 - val_acc: 0.9817 Epoch 10/30 Epoch 00010: val_loss did not improve - 115s - loss: 0.0146 - acc: 0.9929 - val_loss: 0.0650 - val_acc: 0.9837 Epoch 11/30 Epoch 00011: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07. Epoch 00011: val_loss did not improve - 115s - loss: 0.0120 - acc: 0.9980 - val_loss: 0.0737 - val_acc: 0.9837 Epoch 12/30 Epoch 00012: val_loss did not improve - 114s - loss: 0.0111 - acc: 0.9944 - val_loss: 0.0612 - val_acc: 0.9776 Epoch 00012: early stopping Fitting ResNet Fold 2 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.13909, saving model to /home/paperspace/Invasive/resnet.fold_2.hdf5 - 161s - loss: 0.2614 - acc: 0.8983 - val_loss: 0.1391 - val_acc: 0.9614 Epoch 2/30 Epoch 00002: val_loss improved from 0.13909 to 0.09774, saving model to /home/paperspace/Invasive/resnet.fold_2.hdf5 - 131s - loss: 0.1707 - acc: 0.9418 - val_loss: 0.0977 - val_acc: 0.9634 Epoch 3/30 Epoch 00003: val_loss did not improve - 130s - loss: 0.1026 - acc: 0.9641 - val_loss: 0.2262 - val_acc: 0.9390 Epoch 4/30 Epoch 00004: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00004: val_loss did not improve - 136s - loss: 0.0889 - acc: 0.9712 - val_loss: 0.2077 - val_acc: 0.9512 Epoch 5/30 Epoch 00005: val_loss improved from 0.09774 to 0.09658, saving model to /home/paperspace/Invasive/resnet.fold_2.hdf5 - 131s - loss: 0.0696 - acc: 0.9782 - val_loss: 0.0966 - val_acc: 0.9675 Epoch 6/30 Epoch 00006: val_loss improved from 0.09658 to 0.08532, saving model to /home/paperspace/Invasive/resnet.fold_2.hdf5 - 130s - loss: 0.0436 - acc: 0.9884 - val_loss: 0.0853 - val_acc: 0.9715 Epoch 7/30 Epoch 00007: val_loss improved from 0.08532 to 0.07223, saving model to /home/paperspace/Invasive/resnet.fold_2.hdf5 - 130s - loss: 0.0316 - acc: 0.9909 - val_loss: 0.0722 - val_acc: 0.9756 Epoch 8/30 Epoch 00008: val_loss improved from 0.07223 to 0.06971, saving model to /home/paperspace/Invasive/resnet.fold_2.hdf5 - 131s - loss: 0.0277 - acc: 0.9904 - val_loss: 0.0697 - val_acc: 0.9736 Epoch 9/30 Epoch 00009: val_loss did not improve - 130s - loss: 0.0184 - acc: 0.9960 - val_loss: 0.0811 - val_acc: 0.9695 Epoch 10/30 Epoch 00010: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07. Epoch 00010: val_loss did not improve - 130s - loss: 0.0265 - acc: 0.9944 - val_loss: 0.0830 - val_acc: 0.9736 Epoch 11/30 Epoch 00011: val_loss improved from 0.06971 to 0.06357, saving model to /home/paperspace/Invasive/resnet.fold_2.hdf5 - 131s - loss: 0.0160 - acc: 0.9970 - val_loss: 0.0636 - val_acc: 0.9797 Epoch 12/30 Epoch 00012: val_loss improved from 0.06357 to 0.06099, saving model to /home/paperspace/Invasive/resnet.fold_2.hdf5 - 131s - loss: 0.0163 - acc: 0.9960 - val_loss: 0.0610 - val_acc: 0.9817 Epoch 13/30 Epoch 00013: val_loss did not improve - 130s - loss: 0.0115 - acc: 0.9970 - val_loss: 0.0644 - val_acc: 0.9797 Epoch 14/30 Epoch 00014: ReduceLROnPlateau reducing learning rate to 1e-07. Epoch 00014: val_loss did not improve - 131s - loss: 0.0174 - acc: 0.9954 - val_loss: 0.0699 - val_acc: 0.9776 Epoch 15/30 Epoch 00015: val_loss did not improve - 130s - loss: 0.0112 - acc: 0.9975 - val_loss: 0.0718 - val_acc: 0.9736 Epoch 00015: early stopping Fitting Inception Fold 2 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.13975, saving model to /home/paperspace/Invasive/inception.fold_2.hdf5 - 144s - loss: 0.2325 - acc: 0.9064 - val_loss: 0.1397 - val_acc: 0.9654 Epoch 2/30 Epoch 00002: val_loss improved from 0.13975 to 0.05187, saving model to /home/paperspace/Invasive/inception.fold_2.hdf5 - 116s - loss: 0.0966 - acc: 0.9717 - val_loss: 0.0519 - val_acc: 0.9878 Epoch 3/30 Epoch 00003: val_loss did not improve - 115s - loss: 0.0611 - acc: 0.9762 - val_loss: 0.1007 - val_acc: 0.9736 Epoch 4/30 Epoch 00004: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00004: val_loss did not improve - 122s - loss: 0.0624 - acc: 0.9793 - val_loss: 0.0704 - val_acc: 0.9776 Epoch 5/30 Epoch 00005: val_loss did not improve - 116s - loss: 0.0342 - acc: 0.9909 - val_loss: 0.0677 - val_acc: 0.9858 Epoch 00005: early stopping Fitting ResNet Fold 3 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.19468, saving model to /home/paperspace/Invasive/resnet.fold_3.hdf5 - 167s - loss: 0.2354 - acc: 0.9094 - val_loss: 0.1947 - val_acc: 0.9614 Epoch 2/30 Epoch 00002: val_loss improved from 0.19468 to 0.13993, saving model to /home/paperspace/Invasive/resnet.fold_3.hdf5 - 132s - loss: 0.1317 - acc: 0.9524 - val_loss: 0.1399 - val_acc: 0.9512 Epoch 3/30 Epoch 00003: val_loss improved from 0.13993 to 0.13378, saving model to /home/paperspace/Invasive/resnet.fold_3.hdf5 - 132s - loss: 0.0901 - acc: 0.9717 - val_loss: 0.1338 - val_acc: 0.9573 Epoch 4/30 Epoch 00004: val_loss improved from 0.13378 to 0.10736, saving model to /home/paperspace/Invasive/resnet.fold_3.hdf5 - 132s - loss: 0.0635 - acc: 0.9782 - val_loss: 0.1074 - val_acc: 0.9573 Epoch 5/30 Epoch 00005: val_loss did not improve - 131s - loss: 0.0499 - acc: 0.9863 - val_loss: 0.1737 - val_acc: 0.9533 Epoch 6/30 Epoch 00006: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00006: val_loss did not improve - 139s - loss: 0.0827 - acc: 0.9793 - val_loss: 0.1874 - val_acc: 0.9431 Epoch 7/30 Epoch 00007: val_loss improved from 0.10736 to 0.09458, saving model to /home/paperspace/Invasive/resnet.fold_3.hdf5 - 131s - loss: 0.0466 - acc: 0.9823 - val_loss: 0.0946 - val_acc: 0.9736 Epoch 8/30 Epoch 00008: val_loss improved from 0.09458 to 0.07251, saving model to /home/paperspace/Invasive/resnet.fold_3.hdf5 - 131s - loss: 0.0207 - acc: 0.9944 - val_loss: 0.0725 - val_acc: 0.9837 Epoch 9/30 Epoch 00009: val_loss did not improve - 131s - loss: 0.0175 - acc: 0.9960 - val_loss: 0.0801 - val_acc: 0.9837 Epoch 10/30 Epoch 00010: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07. Epoch 00010: val_loss did not improve - 131s - loss: 0.0148 - acc: 0.9944 - val_loss: 0.0781 - val_acc: 0.9797 Epoch 11/30 Epoch 00011: val_loss did not improve - 131s - loss: 0.0155 - acc: 0.9965 - val_loss: 0.0731 - val_acc: 0.9837 Epoch 00011: early stopping Fitting Inception Fold 3 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.14805, saving model to /home/paperspace/Invasive/inception.fold_3.hdf5 - 149s - loss: 0.2304 - acc: 0.9160 - val_loss: 0.1480 - val_acc: 0.9492 Epoch 2/30 Epoch 00002: val_loss improved from 0.14805 to 0.07753, saving model to /home/paperspace/Invasive/inception.fold_3.hdf5 - 116s - loss: 0.1138 - acc: 0.9621 - val_loss: 0.0775 - val_acc: 0.9776 Epoch 3/30 Epoch 00003: val_loss did not improve - 116s - loss: 0.0729 - acc: 0.9777 - val_loss: 0.0998 - val_acc: 0.9675 Epoch 4/30 Epoch 00004: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00004: val_loss did not improve - 124s - loss: 0.0639 - acc: 0.9828 - val_loss: 0.1963 - val_acc: 0.9553 Epoch 5/30 Epoch 00005: val_loss improved from 0.07753 to 0.07202, saving model to /home/paperspace/Invasive/inception.fold_3.hdf5 - 116s - loss: 0.0530 - acc: 0.9879 - val_loss: 0.0720 - val_acc: 0.9756 Epoch 6/30 Epoch 00006: val_loss improved from 0.07202 to 0.06423, saving model to /home/paperspace/Invasive/inception.fold_3.hdf5 - 116s - loss: 0.0374 - acc: 0.9879 - val_loss: 0.0642 - val_acc: 0.9776 Epoch 7/30 Epoch 00007: val_loss did not improve - 116s - loss: 0.0317 - acc: 0.9889 - val_loss: 0.0697 - val_acc: 0.9797 Epoch 8/30 Epoch 00008: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07. Epoch 00008: val_loss did not improve - 116s - loss: 0.0274 - acc: 0.9929 - val_loss: 0.0682 - val_acc: 0.9837 Epoch 9/30 Epoch 00009: val_loss did not improve - 116s - loss: 0.0202 - acc: 0.9965 - val_loss: 0.0658 - val_acc: 0.9817 Epoch 00009: early stopping Fitting ResNet Fold 4 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.11965, saving model to /home/paperspace/Invasive/resnet.fold_4.hdf5 - 171s - loss: 0.2728 - acc: 0.8892 - val_loss: 0.1196 - val_acc: 0.9715 Epoch 2/30 Epoch 00002: val_loss improved from 0.11965 to 0.10213, saving model to /home/paperspace/Invasive/resnet.fold_4.hdf5 - 132s - loss: 0.1139 - acc: 0.9595 - val_loss: 0.1021 - val_acc: 0.9634 Epoch 3/30 Epoch 00003: val_loss improved from 0.10213 to 0.07602, saving model to /home/paperspace/Invasive/resnet.fold_4.hdf5 - 132s - loss: 0.1113 - acc: 0.9681 - val_loss: 0.0760 - val_acc: 0.9817 Epoch 4/30 Epoch 00004: val_loss did not improve - 131s - loss: 0.0838 - acc: 0.9732 - val_loss: 0.1409 - val_acc: 0.9736 Epoch 5/30 Epoch 00005: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00005: val_loss did not improve - 140s - loss: 0.0827 - acc: 0.9717 - val_loss: 0.0837 - val_acc: 0.9695 Epoch 6/30 Epoch 00006: val_loss improved from 0.07602 to 0.05876, saving model to /home/paperspace/Invasive/resnet.fold_4.hdf5 - 132s - loss: 0.0421 - acc: 0.9873 - val_loss: 0.0588 - val_acc: 0.9756 Epoch 7/30 Epoch 00007: val_loss improved from 0.05876 to 0.04407, saving model to /home/paperspace/Invasive/resnet.fold_4.hdf5 - 132s - loss: 0.0370 - acc: 0.9909 - val_loss: 0.0441 - val_acc: 0.9797 Epoch 8/30 Epoch 00008: val_loss did not improve - 132s - loss: 0.0218 - acc: 0.9949 - val_loss: 0.0604 - val_acc: 0.9776 Epoch 9/30 Epoch 00009: val_loss improved from 0.04407 to 0.03794, saving model to /home/paperspace/Invasive/resnet.fold_4.hdf5 - 132s - loss: 0.0296 - acc: 0.9929 - val_loss: 0.0379 - val_acc: 0.9858 Epoch 10/30 Epoch 00010: val_loss did not improve - 132s - loss: 0.0133 - acc: 0.9960 - val_loss: 0.0414 - val_acc: 0.9878 Epoch 11/30 Epoch 00011: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07. Epoch 00011: val_loss did not improve - 132s - loss: 0.0118 - acc: 0.9965 - val_loss: 0.0441 - val_acc: 0.9837 Epoch 12/30 Epoch 00012: val_loss improved from 0.03794 to 0.03326, saving model to /home/paperspace/Invasive/resnet.fold_4.hdf5 - 132s - loss: 0.0165 - acc: 0.9954 - val_loss: 0.0333 - val_acc: 0.9878 Epoch 13/30 Epoch 00013: val_loss did not improve - 131s - loss: 0.0100 - acc: 0.9975 - val_loss: 0.0598 - val_acc: 0.9878 Epoch 14/30 Epoch 00014: ReduceLROnPlateau reducing learning rate to 1e-07. Epoch 00014: val_loss did not improve - 131s - loss: 0.0060 - acc: 0.9990 - val_loss: 0.0539 - val_acc: 0.9878 Epoch 15/30 Epoch 00015: val_loss did not improve - 132s - loss: 0.0082 - acc: 0.9970 - val_loss: 0.0671 - val_acc: 0.9797 Epoch 00015: early stopping Fitting Inception Fold 4 Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.24697, saving model to /home/paperspace/Invasive/inception.fold_4.hdf5 - 155s - loss: 0.2485 - acc: 0.8963 - val_loss: 0.2470 - val_acc: 0.9411 Epoch 2/30 Epoch 00002: val_loss improved from 0.24697 to 0.08318, saving model to /home/paperspace/Invasive/inception.fold_4.hdf5 - 117s - loss: 0.1066 - acc: 0.9610 - val_loss: 0.0832 - val_acc: 0.9776 Epoch 3/30 Epoch 00003: val_loss did not improve - 117s - loss: 0.0689 - acc: 0.9767 - val_loss: 0.0960 - val_acc: 0.9736 Epoch 4/30 Epoch 00004: val_loss improved from 0.08318 to 0.08117, saving model to /home/paperspace/Invasive/inception.fold_4.hdf5 - 118s - loss: 0.0439 - acc: 0.9864 - val_loss: 0.0812 - val_acc: 0.9817 Epoch 5/30 Epoch 00005: val_loss did not improve - 117s - loss: 0.0525 - acc: 0.9848 - val_loss: 0.1083 - val_acc: 0.9756 Epoch 6/30 Epoch 00006: val_loss improved from 0.08117 to 0.06099, saving model to /home/paperspace/Invasive/inception.fold_4.hdf5 - 118s - loss: 0.0249 - acc: 0.9944 - val_loss: 0.0610 - val_acc: 0.9797 Epoch 7/30 Epoch 00007: val_loss did not improve - 117s - loss: 0.0434 - acc: 0.9833 - val_loss: 0.1213 - val_acc: 0.9715 Epoch 8/30 Epoch 00008: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00008: val_loss did not improve - 126s - loss: 0.0277 - acc: 0.9879 - val_loss: 0.0701 - val_acc: 0.9756 Epoch 9/30 Epoch 00009: val_loss improved from 0.06099 to 0.05579, saving model to /home/paperspace/Invasive/inception.fold_4.hdf5 - 117s - loss: 0.0116 - acc: 0.9939 - val_loss: 0.0558 - val_acc: 0.9776 Epoch 10/30 Epoch 00010: val_loss improved from 0.05579 to 0.03924, saving model to /home/paperspace/Invasive/inception.fold_4.hdf5 - 118s - loss: 0.0145 - acc: 0.9934 - val_loss: 0.0392 - val_acc: 0.9878 Epoch 11/30 Epoch 00011: val_loss did not improve - 117s - loss: 0.0058 - acc: 0.9995 - val_loss: 0.0511 - val_acc: 0.9817 Epoch 12/30 Epoch 00012: val_loss improved from 0.03924 to 0.03841, saving model to /home/paperspace/Invasive/inception.fold_4.hdf5 - 118s - loss: 0.0088 - acc: 0.9955 - val_loss: 0.0384 - val_acc: 0.9837 Epoch 13/30 Epoch 00013: val_loss improved from 0.03841 to 0.02837, saving model to /home/paperspace/Invasive/inception.fold_4.hdf5 - 117s - loss: 0.0044 - acc: 0.9995 - val_loss: 0.0284 - val_acc: 0.9898 Epoch 14/30 Epoch 00014: val_loss did not improve - 117s - loss: 0.0058 - acc: 0.9990 - val_loss: 0.0422 - val_acc: 0.9858 Epoch 15/30 Epoch 00015: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07. Epoch 00015: val_loss did not improve - 117s - loss: 0.0053 - acc: 1.0000 - val_loss: 0.0509 - val_acc: 0.9878 Epoch 16/30 Epoch 00016: val_loss did not improve - 117s - loss: 0.0026 - acc: 1.0000 - val_loss: 0.0463 - val_acc: 0.9776 Epoch 00016: early stopping
with open(os.path.join(master_path, 'scores.pickle'), 'rb') as handle:
scores = pickle.load(handle)
scores_res_v, scores_incep_v, scores_res_t, scores_incep_t = scores
res_d = pd.DataFrame()
incept_d = pd.DataFrame()
for i in range(5):
with open(os.path.join(master_path, 'resnet.history.fold_' + str(i) + '.pickle'), 'rb') as handle:
d = pickle.load(handle)
new_res_d = pd.DataFrame.from_dict(d)
new_res_d.rename(index=int, columns={"acc": "train_acc", "loss": "train_loss"}, inplace=True)
new_res_d.columns = [col + '_fold_' + str(i) for col in new_res_d.columns]
res_d = res_d.join(new_res_d, how='outer')
with open(os.path.join(master_path, 'inception.history.fold_' + str(i) + '.pickle'), 'rb') as handle:
d = pickle.load(handle)
new_incept_d = pd.DataFrame.from_dict(d)
new_incept_d.rename(index=int, columns={"acc": "train_acc", "loss": "train_loss"}, inplace=True)
new_incept_d.columns = [col + '_fold_' + str(i) for col in new_incept_d.columns]
incept_d = incept_d.join(new_incept_d, how='outer')
res_d.reset_index(inplace=True)
res_d.rename(index=int, columns={"index": "epoch"}, inplace=True)
res_d.sort_values(by=['epoch'], inplace=True)
incept_d.reset_index(inplace=True)
incept_d.rename(index=int, columns={"index": "epoch"}, inplace=True)
incept_d.sort_values(by=['epoch'], inplace=True)
incep_sc = pd.DataFrame({'Train Accuracy': scores_incep_t, 'Validation Accuracy': scores_incep_v, 'fold': np.arange(5)})
res_sc = pd.DataFrame({'Train Accuracy': scores_res_t, 'Validation Accuracy': scores_res_v, 'fold': np.arange(5)})
t_acc = incep_sc['Train Accuracy'].mean() * 100
v_acc = incep_sc['Validation Accuracy'].mean() * 100
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(13,11))
fig.suptitle("Inception V3 Results", fontsize=16)
incept_d.plot(ax=axes[0, 0], x='epoch', title='Train Accuracy by Epoch/Folds',
y=['train_acc_fold_0', 'train_acc_fold_1', 'train_acc_fold_2', 'train_acc_fold_3', 'train_acc_fold_3'])
axes[0, 0].set_ylim([0.9,1])
incept_d.plot(ax=axes[0, 1], x='epoch', title='Validation Accuracy by Epoch/Folds',
y=['val_acc_fold_0', 'val_acc_fold_1', 'val_acc_fold_2', 'val_acc_fold_3', 'val_acc_fold_3'])
axes[0, 1].set_ylim([0.9,1])
incept_d.plot(ax=axes[1, 0], x='epoch', title='Train Loss by Epoch/Folds',
y=['train_loss_fold_0', 'train_loss_fold_1', 'train_loss_fold_2', 'train_loss_fold_3', 'train_loss_fold_3'])
axes[1, 0].set_ylim([0.00,0.25])
incept_d.plot(ax=axes[1, 1], x='epoch', title='Validation Loss by Epoch/Folds',
y=['val_loss_fold_0', 'val_loss_fold_1', 'val_loss_fold_2', 'val_loss_fold_3', 'val_loss_fold_3'])
axes[1, 1].set_ylim([0.00,0.25])
incept_d.plot(ax=axes[2, 0], x='epoch', title='Learning Rate by Epoch/Folds',
y=['lr_fold_0', 'lr_fold_1', 'lr_fold_2', 'lr_fold_3', 'lr_fold_3'])
axes[2, 1].set_ylim([0.965,1.0])
incep_sc.plot(ax=axes[2, 1], x='fold', title='Avg Accuracy across Folds: Train %.2f%%, Val %.2f%%' % (t_acc, v_acc), xticks=np.arange(5))
plt.tight_layout()
fig.subplots_adjust(top=0.89)
plt.show()
t_acc = res_sc['Train Accuracy'].mean() * 100
v_acc = res_sc['Validation Accuracy'].mean() * 100
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(13,11))
fig.suptitle("ResNet50 Results", fontsize=16)
res_d.plot(ax=axes[0, 0], x='epoch', title='Train Accuracy by Epoch/Folds',
y=['train_acc_fold_0', 'train_acc_fold_1', 'train_acc_fold_2', 'train_acc_fold_3', 'train_acc_fold_3'])
axes[0, 0].set_ylim([0.9,1.0])
res_d.plot(ax=axes[0, 1], x='epoch', title='Validation Accuracy by Epoch/Folds',
y=['val_acc_fold_0', 'val_acc_fold_1', 'val_acc_fold_2', 'val_acc_fold_3', 'val_acc_fold_3'])
axes[0, 1].set_ylim([0.9,1.0])
res_d.plot(ax=axes[1, 0], x='epoch', title='Train Loss by Epoch/Folds',
y=['train_loss_fold_0', 'train_loss_fold_1', 'train_loss_fold_2', 'train_loss_fold_3', 'train_loss_fold_3'])
axes[1, 0].set_ylim([0.00,0.25])
res_d.plot(ax=axes[1, 1], x='epoch', title='Validation Los by Epoch/Foldss',
y=['val_loss_fold_0', 'val_loss_fold_1', 'val_loss_fold_2', 'val_loss_fold_3', 'val_loss_fold_3'])
axes[1, 1].set_ylim([0.00,0.25])
res_d.plot(ax=axes[2, 0], x='epoch', title='Learning Rate by Epoch/Folds',
y=['lr_fold_0', 'lr_fold_1', 'lr_fold_2', 'lr_fold_3', 'lr_fold_3'])
res_sc.plot(ax=axes[2, 1], x='fold', title='Avg Accuracy across Folds: Train %.2f%%, Val %.2f%%' % (t_acc, v_acc), xticks=np.arange(5))
axes[2, 1].set_ylim([0.965,1.0])
plt.tight_layout()
fig.subplots_adjust(top=0.89)
plt.show()
size = (300, 300)
img_dim = (300, 300, 3)
final = {name: [] for name in X_test.name.values}
for i in range(5):
print('Loading and predicting with Models Fold ' + str(i))
res = get_model('resnet50', img_dim=img_dim)
incep = get_model('inception', img_dim=img_dim)
res.load_weights(filepath=os.path.join(master_path, 'resnet.fold_' + str(i) + '.hdf5'))
incep.load_weights(filepath=os.path.join(master_path, 'inception.fold_' + str(i) + '.hdf5'))
for name in X_test.name.values:
preprocessing_function = preprocess_input_resnet50
img = prepare_image(name, size, preprocessing_function)
img = np.expand_dims(img, axis=0)
pred_res = res.predict(img)
final[name].append(pred_res)
preprocessing_function = preprocess_input_inception
img = prepare_image(name, size, preprocessing_function)
img = np.expand_dims(img, axis=0)
pred_incep = incep.predict(img)
final[name].append(pred_incep)
Loading and predicting with Models Fold 0 Loading and predicting with Models Fold 1 Loading and predicting with Models Fold 2 Loading and predicting with Models Fold 3 Loading and predicting with Models Fold 4
final_average = {k: [np.array(v).mean()] for k, v in final.items()}
test_proba = pd.DataFrame.from_dict(final_average, orient='index').reset_index()
test_proba.rename(index=int, columns={"index": "name", 0: "invasive_proba"}, inplace=True)
test = X_test.merge(test_proba, on='name')
test['prediction'] = np.where(test.invasive_proba > 0.5, 1, 0)
test.head()
name | invasive | invasive_proba | prediction | |
---|---|---|---|---|
0 | /home/paperspace/Invasive/train/142.jpg | 0 | 0.011241 | 0 |
1 | /home/paperspace/Invasive/train/2477.jpg | 0 | 0.000811 | 0 |
2 | /home/paperspace/Invasive/train/1751.jpg | 1 | 0.999922 | 1 |
3 | /home/paperspace/Invasive/train/1710.jpg | 0 | 0.001321 | 0 |
4 | /home/paperspace/Invasive/train/1926.jpg | 0 | 0.000899 | 0 |
print('Test Set Accuracy (averaged across Inception V3 and ResNet50 5 Folds): %.2f%%'
% (metrics.accuracy_score(test.invasive, test.prediction)*100))
Test Set Accuracy (averaged across Inception V3 and ResNet50 5 Folds): 98.85%
m = VGG16(weights='imagenet', include_top=True)
j = [i for i, layer in enumerate(m.layers) if 'flatten' in layer.name][-1]
x = m.layers[j].output
x = BatchNormalization()(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(input=m.input, output=x)
model.summary()
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_37 (InputLayer) (None, 224, 224, 3) 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 _________________________________________________________________ block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 _________________________________________________________________ block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 _________________________________________________________________ block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 _________________________________________________________________ block3_conv3 (Conv2D) (None, 56, 56, 256) 590080 _________________________________________________________________ block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 _________________________________________________________________ block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 _________________________________________________________________ block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 _________________________________________________________________ block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 _________________________________________________________________ block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 _________________________________________________________________ block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 _________________________________________________________________ block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 _________________________________________________________________ flatten (Flatten) (None, 25088) 0 _________________________________________________________________ batch_normalization_1053 (Ba (None, 25088) 100352 _________________________________________________________________ dropout_19 (Dropout) (None, 25088) 0 _________________________________________________________________ dense_19 (Dense) (None, 1) 25089 ================================================================= Total params: 14,840,129 Trainable params: 14,789,953 Non-trainable params: 50,176 _________________________________________________________________
/home/paperspace/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor("in..., outputs=Tensor("de...)` import sys
batch_size = 8
size = (224, 224)
epochs = 30
kf = KFold(n_splits=5, shuffle=True, random_state=3)
x = X_train
y = X_train.invasive
for i, (train_index, test_index) in enumerate(kf.split(x)):
x_train = x.iloc[train_index]; x_valid = x.iloc[test_index]
y_train = y.iloc[train_index]; y_valid = y.iloc[test_index]
train_steps = len(x_train) / batch_size
valid_steps = len(x_valid) / batch_size
early = EarlyStopping(monitor='val_loss', patience=3, verbose=1, min_delta=1e-4)
reducelr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=1, cooldown=1, verbose=1, min_lr=1e-7)
checkincept = ModelCheckpoint(filepath=os.path.join(master_path, 'vis.hdf5'), verbose=1,save_best_only=True,
save_weights_only=True, mode='auto')
model.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy', metrics = ['accuracy'])
model.fit_generator(data_generator(x_train, 'vgg', size=size, batch_size=batch_size),
train_steps, epochs=epochs, verbose=2,
validation_data=data_generator(x_valid, 'vgg', size=size, batch_size=batch_size),
validation_steps=valid_steps, callbacks=[early, reducelr, checkincept])
break
model.load_weights(os.path.join(master_path, 'vis.hdf5'))
Epoch 1/30 Epoch 00001: val_loss improved from inf to 0.33954, saving model to /home/paperspace/Invasive/vis.hdf5 - 148s - loss: 0.4510 - acc: 0.7901 - val_loss: 0.3395 - val_acc: 0.8600 Epoch 2/30 Epoch 00002: val_loss did not improve - 101s - loss: 0.3473 - acc: 0.8674 - val_loss: 2.4906 - val_acc: 0.5801 Epoch 3/30 Epoch 00003: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-06. Epoch 00003: val_loss did not improve - 109s - loss: 0.3039 - acc: 0.8806 - val_loss: 0.8059 - val_acc: 0.8337 Epoch 4/30 Epoch 00004: val_loss improved from 0.33954 to 0.21896, saving model to /home/paperspace/Invasive/vis.hdf5 - 101s - loss: 0.2474 - acc: 0.9131 - val_loss: 0.2190 - val_acc: 0.9148 Epoch 5/30 Epoch 00005: val_loss improved from 0.21896 to 0.17527, saving model to /home/paperspace/Invasive/vis.hdf5 - 101s - loss: 0.2330 - acc: 0.9157 - val_loss: 0.1753 - val_acc: 0.9331 Epoch 6/30 Epoch 00006: val_loss improved from 0.17527 to 0.14505, saving model to /home/paperspace/Invasive/vis.hdf5 - 101s - loss: 0.2444 - acc: 0.9157 - val_loss: 0.1451 - val_acc: 0.9533 Epoch 7/30 Epoch 00007: val_loss did not improve - 101s - loss: 0.2296 - acc: 0.9177 - val_loss: 0.2940 - val_acc: 0.9412 Epoch 8/30 Epoch 00008: ReduceLROnPlateau reducing learning rate to 9.999999747378752e-07. Epoch 00008: val_loss did not improve - 100s - loss: 0.2139 - acc: 0.9233 - val_loss: 0.3973 - val_acc: 0.9168 Epoch 9/30 Epoch 00009: val_loss improved from 0.14505 to 0.14237, saving model to /home/paperspace/Invasive/vis.hdf5 - 101s - loss: 0.2136 - acc: 0.9278 - val_loss: 0.1424 - val_acc: 0.9331 Epoch 10/30 Epoch 00010: val_loss did not improve - 101s - loss: 0.2141 - acc: 0.9228 - val_loss: 0.2157 - val_acc: 0.9168 Epoch 11/30 Epoch 00011: ReduceLROnPlateau reducing learning rate to 1e-07. Epoch 00011: val_loss did not improve - 101s - loss: 0.2125 - acc: 0.9278 - val_loss: 0.1969 - val_acc: 0.9331 Epoch 12/30 Epoch 00012: val_loss did not improve - 101s - loss: 0.2091 - acc: 0.9263 - val_loss: 0.1620 - val_acc: 0.9249 Epoch 00012: early stopping
from vis.utils import utils
plt.rcParams['figure.figsize'] = (15, 6)
t = X_test.loc[X_test.invasive == 1, 'name'].sample(2).values
img1 = utils.load_img(t[0], target_size=(224, 224))
img2 = utils.load_img(t[1], target_size=(224, 224))
f, ax = plt.subplots(1, 2)
ax[0].imshow(img1)
ax[1].imshow(img2)
plt.show()
/home/paperspace/anaconda3/lib/python3.6/site-packages/skimage/transform/_warps.py:84: UserWarning: The default mode, 'constant', will be changed to 'reflect' in skimage 0.15. warn("The default mode, 'constant', will be changed to 'reflect' in " /home/paperspace/anaconda3/lib/python3.6/site-packages/skimage/transform/_warps.py:84: UserWarning: The default mode, 'constant', will be changed to 'reflect' in skimage 0.15. warn("The default mode, 'constant', will be changed to 'reflect' in "
from vis.visualization import visualize_cam
import matplotlib.cm as cm
from vis.visualization import visualize_saliency, overlay
for modifier in [None, 'guided', 'relu']:
plt.figure()
f, ax = plt.subplots(1, 2)
plt.suptitle("vanilla" if modifier is None else modifier)
for i, img in enumerate([img1, img2]):
grads = visualize_cam(model, layer_idx=19, filter_indices=1,
penultimate_layer_idx=18,
seed_input=img, backprop_modifier=modifier)
# Lets overlay the heatmap onto original image.
jet_heatmap = np.uint8(cm.jet(grads)[..., :3] * 255)
ax[i].imshow(overlay(jet_heatmap[:,:,:,0], img))
<matplotlib.figure.Figure at 0x7f9c65bc0ef0>
<matplotlib.figure.Figure at 0x7f9c65bd40f0>
<matplotlib.figure.Figure at 0x7f9c656f7588>