import keras
Using TensorFlow backend.
https://www.kaggle.com/c/dogs-vs-cats/data
Unsurprisingly, the cats vs. dogs Kaggle competition in 2013 was won by entrants who used convnets. The best entries could achieve up to 95% accuracy. In our own example, we will get fairly close to this accuracy (in the next section), even though we will be training our models on less than 10% of the data that was available to the competitors. This original dataset contains 25,000 images of dogs and cats (12,500 from each class) and is 543MB large (compressed). After downloading and uncompressing it, we will create a new dataset containing three subsets: a training set with 1000 samples of each class, a validation set with 500 samples of each class, and finally a test set with 500 samples of each class.
Here are a few lines of code to do this:
import os, shutil
# The path to the directory where the original
# dataset was uncompressed
original_dataset_dir = 'D:/dataset/catDog/train'
# The directory where we will
# store our smaller dataset
base_dir = 'D:/dataset/catDog/catVsdog'
os.mkdir(base_dir)
# Directories for our training,
# validation and test splits
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)
# Directory with our validation cat pictures
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
# Directory with our validation dog pictures
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
# Copy first 1000 cat images to train_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to validation_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to test_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy first 1000 dog images to train_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to validation_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to test_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
train_cats_dir
'D:/dataset/catDog/catVsdog\\train\\cats'
print('total training cat images:', len(os.listdir(train_cats_dir)))
total training cat images: 1000
print('total training dog images:', len(os.listdir(train_dogs_dir)))
total training dog images: 1000
print('total validation cat images:', len(os.listdir(validation_cats_dir)))
total validation cat images: 500
print('total validation dog images:', len(os.listdir(validation_dogs_dir)))
total validation dog images: 500
print('total test cat images:', len(os.listdir(test_cats_dir)))
total test cat images: 500
print('total test dog images:', len(os.listdir(test_dogs_dir)))
total test dog images: 500
Since we are attacking a binary classification problem, we are ending the network with a single unit (a Dense
layer of size 1) and a
sigmoid
activation. This unit will encode the probability that the network is looking at one class or the other.
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
Let's take a look at how the dimensions of the feature maps change with every successive layer:
model.summary()
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_5 (Conv2D) (None, 148, 148, 32) 896 _________________________________________________________________ max_pooling2d_5 (MaxPooling2 (None, 74, 74, 32) 0 _________________________________________________________________ conv2d_6 (Conv2D) (None, 72, 72, 64) 18496 _________________________________________________________________ max_pooling2d_6 (MaxPooling2 (None, 36, 36, 64) 0 _________________________________________________________________ conv2d_7 (Conv2D) (None, 34, 34, 128) 73856 _________________________________________________________________ max_pooling2d_7 (MaxPooling2 (None, 17, 17, 128) 0 _________________________________________________________________ conv2d_8 (Conv2D) (None, 15, 15, 128) 147584 _________________________________________________________________ max_pooling2d_8 (MaxPooling2 (None, 7, 7, 128) 0 _________________________________________________________________ flatten_2 (Flatten) (None, 6272) 0 _________________________________________________________________ dense_3 (Dense) (None, 512) 3211776 _________________________________________________________________ dense_4 (Dense) (None, 1) 513 ================================================================= Total params: 3,453,121 Trainable params: 3,453,121 Non-trainable params: 0 _________________________________________________________________
For our compilation step, we'll go with the RMSprop
optimizer as usual. Since we ended our network with a single sigmoid unit, we will
use binary crossentropy as our loss (as a reminder, check out the table in Chapter 4, section 5 for a cheatsheet on what loss function to
use in various situations).
from keras import optimizers
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
It may seem a bit daunting, but thankfully Keras has utilities to take care of these steps automatically. Keras has a module with image
processing helper tools, located at keras.preprocessing.image
. In particular, it contains the class ImageDataGenerator
which allows to
quickly set up Python generators that can automatically turn image files on disk into batches of pre-processed tensors. This is what we
will use here.
https://keras.io/preprocessing/image/
from keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
Found 2000 images belonging to 2 classes. Found 1000 images belonging to 2 classes.
Let's take a look at the output of one of these generators: it yields batches of 150x150 RGB images (shape (20, 150, 150, 3)
) and binary
labels (shape (20,)
). 20 is the number of samples in each batch (the batch size). Note that the generator yields these batches
indefinitely: it just loops endlessly over the images present in the target folder. For this reason, we need to break
the iteration loop
at some point.
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
data batch shape: (20, 150, 150, 3) labels batch shape: (20,)
Let's fit our model to the data using the generator. We do it using the fit_generator
method, the equivalent of fit
for data generators
like ours. It expects as first argument a Python generator that will yield batches of inputs and targets indefinitely, like ours does.
Because the data is being generated endlessly, the generator needs to know example how many samples to draw from the generator before
declaring an epoch over. This is the role of the steps_per_epoch
argument: after having drawn steps_per_epoch
batches from the
generator, i.e. after having run for steps_per_epoch
gradient descent steps, the fitting process will go to the next epoch. In our case,
batches are 20-sample large, so it will take 100 batches until we see our target of 2000 samples.
When using fit_generator
, one may pass a validation_data
argument, much like with the fit
method. Importantly, this argument is
allowed to be a data generator itself, but it could be a tuple of Numpy arrays as well. If you pass a generator as validation_data
, then
this generator is expected to yield batches of validation data endlessly, and thus you should also specify the validation_steps
argument,
which tells the process how many batches to draw from the validation generator for evaluation.
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50)
Epoch 1/30 100/100 [==============================] - 16s 163ms/step - loss: 0.6893 - acc: 0.5310 - val_loss: 0.6687 - val_acc: 0.6220 Epoch 2/30 100/100 [==============================] - 12s 121ms/step - loss: 0.6580 - acc: 0.6045 - val_loss: 0.6300 - val_acc: 0.6510 Epoch 3/30 100/100 [==============================] - 12s 121ms/step - loss: 0.6024 - acc: 0.6740 - val_loss: 0.5989 - val_acc: 0.6770 Epoch 4/30 100/100 [==============================] - 12s 121ms/step - loss: 0.5647 - acc: 0.7050 - val_loss: 0.5879 - val_acc: 0.6730 Epoch 5/30 100/100 [==============================] - 12s 121ms/step - loss: 0.5325 - acc: 0.7360 - val_loss: 0.5854 - val_acc: 0.6780 Epoch 6/30 100/100 [==============================] - 12s 122ms/step - loss: 0.5045 - acc: 0.7460 - val_loss: 0.5794 - val_acc: 0.6830 Epoch 7/30 100/100 [==============================] - 12s 122ms/step - loss: 0.4919 - acc: 0.7620 - val_loss: 0.5656 - val_acc: 0.6930 Epoch 8/30 100/100 [==============================] - 12s 122ms/step - loss: 0.4612 - acc: 0.7830 - val_loss: 0.5447 - val_acc: 0.7210 Epoch 9/30 100/100 [==============================] - 12s 122ms/step - loss: 0.4417 - acc: 0.7935 - val_loss: 0.5384 - val_acc: 0.7240 Epoch 10/30 100/100 [==============================] - 12s 122ms/step - loss: 0.4128 - acc: 0.8190 - val_loss: 0.6742 - val_acc: 0.6750 Epoch 11/30 100/100 [==============================] - 12s 122ms/step - loss: 0.3798 - acc: 0.8330 - val_loss: 0.6281 - val_acc: 0.7030 Epoch 12/30 100/100 [==============================] - 12s 122ms/step - loss: 0.3571 - acc: 0.8435 - val_loss: 0.5573 - val_acc: 0.7280 Epoch 13/30 100/100 [==============================] - 12s 122ms/step - loss: 0.3478 - acc: 0.8495 - val_loss: 0.5481 - val_acc: 0.7340 Epoch 14/30 100/100 [==============================] - 12s 123ms/step - loss: 0.3196 - acc: 0.8620 - val_loss: 0.5440 - val_acc: 0.7340 Epoch 15/30 100/100 [==============================] - 12s 122ms/step - loss: 0.2937 - acc: 0.8900 - val_loss: 0.5569 - val_acc: 0.7390 Epoch 16/30 100/100 [==============================] - 12s 122ms/step - loss: 0.2660 - acc: 0.8980 - val_loss: 0.5677 - val_acc: 0.7400 Epoch 17/30 100/100 [==============================] - 12s 122ms/step - loss: 0.2532 - acc: 0.9005 - val_loss: 0.5804 - val_acc: 0.7300 Epoch 18/30 100/100 [==============================] - 12s 122ms/step - loss: 0.2281 - acc: 0.9145 - val_loss: 0.5744 - val_acc: 0.7420 Epoch 19/30 100/100 [==============================] - 12s 122ms/step - loss: 0.2196 - acc: 0.9170 - val_loss: 0.6044 - val_acc: 0.7380 Epoch 20/30 100/100 [==============================] - 12s 125ms/step - loss: 0.1897 - acc: 0.9335 - val_loss: 0.6577 - val_acc: 0.7400 Epoch 21/30 100/100 [==============================] - 13s 126ms/step - loss: 0.1760 - acc: 0.9380 - val_loss: 0.7579 - val_acc: 0.7240 Epoch 22/30 100/100 [==============================] - 12s 123ms/step - loss: 0.1486 - acc: 0.9475 - val_loss: 0.6777 - val_acc: 0.7420 Epoch 23/30 100/100 [==============================] - 12s 123ms/step - loss: 0.1415 - acc: 0.9515 - val_loss: 0.6760 - val_acc: 0.7460 Epoch 24/30 100/100 [==============================] - 12s 122ms/step - loss: 0.1238 - acc: 0.9635 - val_loss: 0.7288 - val_acc: 0.7410 Epoch 25/30 100/100 [==============================] - 12s 122ms/step - loss: 0.1112 - acc: 0.9605 - val_loss: 0.7423 - val_acc: 0.7410 Epoch 26/30 100/100 [==============================] - 12s 123ms/step - loss: 0.0929 - acc: 0.9700 - val_loss: 0.8013 - val_acc: 0.7300 Epoch 27/30 100/100 [==============================] - 12s 123ms/step - loss: 0.0821 - acc: 0.9725 - val_loss: 0.8659 - val_acc: 0.7390 Epoch 28/30 100/100 [==============================] - 12s 123ms/step - loss: 0.0737 - acc: 0.9735 - val_loss: 0.9034 - val_acc: 0.7170 Epoch 29/30 100/100 [==============================] - 12s 123ms/step - loss: 0.0614 - acc: 0.9800 - val_loss: 0.8236 - val_acc: 0.7450 Epoch 30/30 100/100 [==============================] - 12s 124ms/step - loss: 0.0528 - acc: 0.9855 - val_loss: 0.8810 - val_acc: 0.7350
Let's plot the loss and accuracy of the model over the training and validation data during training:
import matplotlib.pyplot as plt
%matplotlib inline
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
These plots are characteristic of overfitting. Our training accuracy increases linearly over time, until it reaches nearly 100%, while our validation accuracy stalls at 70-72%. Our validation loss reaches its minimum after only five epochs then stalls, while the training loss keeps decreasing linearly until it reaches nearly 0.
Because we only have relatively few training samples (2000), overfitting is going to be our number one concern. Let's train our network using data augmentation and dropout:
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
Epoch 1/100 100/100 [==============================] - 14s 140ms/step - loss: 0.6915 - acc: 0.5220 - val_loss: 0.6812 - val_acc: 0.5070 Epoch 2/100 100/100 [==============================] - 13s 128ms/step - loss: 0.6726 - acc: 0.5910 - val_loss: 0.6958 - val_acc: 0.5490 Epoch 3/100 100/100 [==============================] - 13s 127ms/step - loss: 0.6395 - acc: 0.6325 - val_loss: 0.6757 - val_acc: 0.5740 Epoch 4/100 100/100 [==============================] - 13s 127ms/step - loss: 0.5973 - acc: 0.6840 - val_loss: 0.6083 - val_acc: 0.6690 Epoch 5/100 100/100 [==============================] - 13s 128ms/step - loss: 0.5563 - acc: 0.7150 - val_loss: 0.5897 - val_acc: 0.6700 Epoch 6/100 100/100 [==============================] - 13s 128ms/step - loss: 0.5454 - acc: 0.7250 - val_loss: 0.5867 - val_acc: 0.6700 Epoch 7/100 100/100 [==============================] - 13s 128ms/step - loss: 0.5314 - acc: 0.7325 - val_loss: 0.6768 - val_acc: 0.6480 Epoch 8/100 100/100 [==============================] - 13s 127ms/step - loss: 0.5143 - acc: 0.7495 - val_loss: 0.5965 - val_acc: 0.6820 Epoch 9/100 100/100 [==============================] - 13s 127ms/step - loss: 0.4845 - acc: 0.7765 - val_loss: 0.5718 - val_acc: 0.7030 Epoch 10/100 100/100 [==============================] - 13s 129ms/step - loss: 0.4751 - acc: 0.7720 - val_loss: 0.5514 - val_acc: 0.7080 Epoch 11/100 100/100 [==============================] - 13s 129ms/step - loss: 0.4528 - acc: 0.7975 - val_loss: 0.5415 - val_acc: 0.7240 Epoch 12/100 100/100 [==============================] - 13s 128ms/step - loss: 0.4319 - acc: 0.7960 - val_loss: 0.5437 - val_acc: 0.7220 Epoch 13/100 100/100 [==============================] - 13s 128ms/step - loss: 0.4330 - acc: 0.7970 - val_loss: 0.5356 - val_acc: 0.7340 Epoch 14/100 100/100 [==============================] - 13s 128ms/step - loss: 0.3992 - acc: 0.8175 - val_loss: 0.5457 - val_acc: 0.7360 Epoch 15/100 100/100 [==============================] - 13s 128ms/step - loss: 0.3902 - acc: 0.8305 - val_loss: 0.5515 - val_acc: 0.7310 Epoch 16/100 100/100 [==============================] - 13s 128ms/step - loss: 0.3651 - acc: 0.8380 - val_loss: 0.5452 - val_acc: 0.7410 Epoch 17/100 100/100 [==============================] - 13s 128ms/step - loss: 0.3582 - acc: 0.8460 - val_loss: 0.5420 - val_acc: 0.7300 Epoch 18/100 100/100 [==============================] - 13s 128ms/step - loss: 0.3387 - acc: 0.8555 - val_loss: 0.5957 - val_acc: 0.7400 Epoch 19/100 100/100 [==============================] - 13s 129ms/step - loss: 0.3258 - acc: 0.8545 - val_loss: 0.5654 - val_acc: 0.7470 Epoch 20/100 100/100 [==============================] - 13s 128ms/step - loss: 0.3055 - acc: 0.8710 - val_loss: 0.5745 - val_acc: 0.7320 Epoch 21/100 100/100 [==============================] - 13s 129ms/step - loss: 0.2874 - acc: 0.8740 - val_loss: 0.5539 - val_acc: 0.7510 Epoch 22/100 100/100 [==============================] - 13s 129ms/step - loss: 0.2944 - acc: 0.8805 - val_loss: 0.5795 - val_acc: 0.7450 Epoch 23/100 100/100 [==============================] - 13s 128ms/step - loss: 0.2769 - acc: 0.8795 - val_loss: 0.5777 - val_acc: 0.7570 Epoch 24/100 100/100 [==============================] - 13s 129ms/step - loss: 0.2563 - acc: 0.8940 - val_loss: 0.5677 - val_acc: 0.7550 Epoch 25/100 100/100 [==============================] - 13s 129ms/step - loss: 0.2575 - acc: 0.8925 - val_loss: 0.5461 - val_acc: 0.7570 Epoch 26/100 100/100 [==============================] - 13s 130ms/step - loss: 0.2396 - acc: 0.9025 - val_loss: 0.5600 - val_acc: 0.7600 Epoch 27/100 100/100 [==============================] - 13s 128ms/step - loss: 0.2237 - acc: 0.9160 - val_loss: 0.5735 - val_acc: 0.7600 Epoch 28/100 100/100 [==============================] - 13s 129ms/step - loss: 0.2203 - acc: 0.9110 - val_loss: 0.5838 - val_acc: 0.7440 Epoch 29/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1963 - acc: 0.9320 - val_loss: 0.5757 - val_acc: 0.7620 Epoch 30/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1909 - acc: 0.9235 - val_loss: 0.6068 - val_acc: 0.7540 Epoch 31/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1840 - acc: 0.9280 - val_loss: 0.5872 - val_acc: 0.7670 Epoch 32/100 100/100 [==============================] - 12s 124ms/step - loss: 0.1834 - acc: 0.9295 - val_loss: 0.6143 - val_acc: 0.7610 Epoch 33/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1588 - acc: 0.9475 - val_loss: 0.6235 - val_acc: 0.7620 Epoch 34/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1596 - acc: 0.9375 - val_loss: 0.6614 - val_acc: 0.7650 Epoch 35/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1581 - acc: 0.9390 - val_loss: 0.6994 - val_acc: 0.7410 Epoch 36/100 100/100 [==============================] - 12s 124ms/step - loss: 0.1465 - acc: 0.9410 - val_loss: 0.6495 - val_acc: 0.7550 Epoch 37/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1356 - acc: 0.9520 - val_loss: 0.6432 - val_acc: 0.7630 Epoch 38/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1271 - acc: 0.9550 - val_loss: 0.6282 - val_acc: 0.7700 Epoch 39/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1192 - acc: 0.9605 - val_loss: 0.6786 - val_acc: 0.7630 Epoch 40/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1239 - acc: 0.9535 - val_loss: 0.6814 - val_acc: 0.7570 Epoch 41/100 100/100 [==============================] - 12s 124ms/step - loss: 0.1155 - acc: 0.9585 - val_loss: 0.6910 - val_acc: 0.7550 Epoch 42/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0936 - acc: 0.9690 - val_loss: 0.7495 - val_acc: 0.7700 Epoch 43/100 100/100 [==============================] - 12s 123ms/step - loss: 0.1073 - acc: 0.9565 - val_loss: 0.6949 - val_acc: 0.7680 Epoch 44/100 100/100 [==============================] - 12s 125ms/step - loss: 0.0899 - acc: 0.9690 - val_loss: 0.7179 - val_acc: 0.7560 Epoch 45/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0886 - acc: 0.9665 - val_loss: 0.6943 - val_acc: 0.7820 Epoch 46/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0752 - acc: 0.9750 - val_loss: 0.7567 - val_acc: 0.7740 Epoch 47/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0755 - acc: 0.9750 - val_loss: 0.9447 - val_acc: 0.7350 Epoch 48/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0764 - acc: 0.9720 - val_loss: 0.8899 - val_acc: 0.7590 Epoch 49/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0741 - acc: 0.9775 - val_loss: 0.8029 - val_acc: 0.7630 Epoch 50/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0656 - acc: 0.9750 - val_loss: 0.8256 - val_acc: 0.7530 Epoch 51/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0579 - acc: 0.9840 - val_loss: 0.8192 - val_acc: 0.7690 Epoch 52/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0608 - acc: 0.9790 - val_loss: 0.8388 - val_acc: 0.7800 Epoch 53/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0651 - acc: 0.9805 - val_loss: 0.8266 - val_acc: 0.7680 Epoch 54/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0627 - acc: 0.9785 - val_loss: 0.9087 - val_acc: 0.7540 Epoch 55/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0570 - acc: 0.9820 - val_loss: 0.8696 - val_acc: 0.7640 Epoch 56/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0560 - acc: 0.9830 - val_loss: 0.9158 - val_acc: 0.7780 Epoch 57/100 100/100 [==============================] - 13s 125ms/step - loss: 0.0511 - acc: 0.9830 - val_loss: 0.9401 - val_acc: 0.7790 Epoch 58/100 100/100 [==============================] - 12s 125ms/step - loss: 0.0456 - acc: 0.9850 - val_loss: 0.9241 - val_acc: 0.7710 Epoch 59/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0503 - acc: 0.9815 - val_loss: 0.8782 - val_acc: 0.7730 Epoch 60/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0463 - acc: 0.9845 - val_loss: 0.9701 - val_acc: 0.7620 Epoch 61/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0446 - acc: 0.9855 - val_loss: 0.9633 - val_acc: 0.7660 Epoch 62/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0351 - acc: 0.9905 - val_loss: 1.0186 - val_acc: 0.7610 Epoch 63/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0574 - acc: 0.9775 - val_loss: 0.9036 - val_acc: 0.7680 Epoch 64/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0357 - acc: 0.9875 - val_loss: 1.0182 - val_acc: 0.7650 Epoch 65/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0403 - acc: 0.9865 - val_loss: 1.1160 - val_acc: 0.7610 Epoch 66/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0414 - acc: 0.9865 - val_loss: 1.0252 - val_acc: 0.7580 Epoch 67/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0317 - acc: 0.9890 - val_loss: 1.0357 - val_acc: 0.7750 Epoch 68/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0348 - acc: 0.9885 - val_loss: 0.9770 - val_acc: 0.7580 Epoch 69/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0361 - acc: 0.9890 - val_loss: 1.0370 - val_acc: 0.7640 Epoch 70/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0330 - acc: 0.9905 - val_loss: 1.0350 - val_acc: 0.7700 Epoch 71/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0394 - acc: 0.9850 - val_loss: 1.0132 - val_acc: 0.7710 Epoch 72/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0312 - acc: 0.9910 - val_loss: 1.0268 - val_acc: 0.7750 Epoch 73/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0265 - acc: 0.9900 - val_loss: 1.0852 - val_acc: 0.7690 Epoch 74/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0327 - acc: 0.9905 - val_loss: 1.1617 - val_acc: 0.7590 Epoch 75/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0360 - acc: 0.9875 - val_loss: 1.0486 - val_acc: 0.7710 Epoch 76/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0244 - acc: 0.9915 - val_loss: 1.1726 - val_acc: 0.7530 Epoch 77/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0326 - acc: 0.9875 - val_loss: 1.0927 - val_acc: 0.7650 Epoch 78/100 100/100 [==============================] - 12s 125ms/step - loss: 0.0404 - acc: 0.9865 - val_loss: 1.1501 - val_acc: 0.7710 Epoch 79/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0225 - acc: 0.9935 - val_loss: 1.1750 - val_acc: 0.7550 Epoch 80/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0261 - acc: 0.9935 - val_loss: 1.1779 - val_acc: 0.7660 Epoch 81/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0240 - acc: 0.9910 - val_loss: 1.0923 - val_acc: 0.7710 Epoch 82/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0274 - acc: 0.9885 - val_loss: 1.2626 - val_acc: 0.7550 Epoch 83/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0255 - acc: 0.9920 - val_loss: 1.1342 - val_acc: 0.7650 Epoch 84/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0257 - acc: 0.9905 - val_loss: 1.0933 - val_acc: 0.7660 Epoch 85/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0218 - acc: 0.9910 - val_loss: 1.1167 - val_acc: 0.7690 Epoch 86/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0230 - acc: 0.9920 - val_loss: 1.2408 - val_acc: 0.7670 Epoch 87/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0200 - acc: 0.9935 - val_loss: 1.2296 - val_acc: 0.7600 Epoch 88/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0158 - acc: 0.9950 - val_loss: 1.2875 - val_acc: 0.7630 Epoch 89/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0219 - acc: 0.9920 - val_loss: 1.2603 - val_acc: 0.7540 Epoch 90/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0222 - acc: 0.9920 - val_loss: 1.1655 - val_acc: 0.7610 Epoch 91/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0230 - acc: 0.9935 - val_loss: 1.1637 - val_acc: 0.7640 Epoch 92/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0246 - acc: 0.9925 - val_loss: 1.1986 - val_acc: 0.7780 Epoch 93/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0251 - acc: 0.9910 - val_loss: 1.1537 - val_acc: 0.7700 Epoch 94/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0204 - acc: 0.9915 - val_loss: 1.2301 - val_acc: 0.7670 Epoch 95/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0255 - acc: 0.9910 - val_loss: 1.3497 - val_acc: 0.7650 Epoch 96/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0242 - acc: 0.9915 - val_loss: 1.2397 - val_acc: 0.7640 Epoch 97/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0292 - acc: 0.9910 - val_loss: 1.1892 - val_acc: 0.7750 Epoch 98/100 100/100 [==============================] - 12s 123ms/step - loss: 0.0242 - acc: 0.9930 - val_loss: 1.2302 - val_acc: 0.7660 Epoch 99/100 100/100 [==============================] - 12s 124ms/step - loss: 0.0229 - acc: 0.9915 - val_loss: 1.3007 - val_acc: 0.7590 Epoch 100/100 100/100 [==============================] - 13s 125ms/step - loss: 0.0197 - acc: 0.9920 - val_loss: 1.2037 - val_acc: 0.7760
Let's plot our results again:
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
model.save('cats_and_dogs_small_1.h5')