#!/usr/bin/env python # coding: utf-8 #

Table of Contents

#
# In[1]: # code for loading the format for the notebook import os # path : store the current path to convert back to it later path = os.getcwd() os.chdir(os.path.join('..', 'notebook_format')) from formats import load_style load_style(plot_style=False) # In[2]: os.chdir(path) # 1. magic to print version # 2. magic so that the notebook will reload external python modules get_ipython().run_line_magic('load_ext', 'watermark') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import numpy as np import pandas as pd import keras.backend as K from keras.datasets import mnist from keras.utils import np_utils from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Activation, Flatten get_ipython().run_line_magic('watermark', "-a 'Ethen' -d -t -v -p numpy,pandas,keras") # # Convolutional Network # In[3]: # loading the mnist dataset as an example (X_train, y_train), (X_test, y_test) = mnist.load_data() print('X_train shape:', X_train.shape) print(X_train.shape[0], 'train samples') print(X_test.shape[0] , 'test samples') # In[4]: # input image dimensions img_rows, img_cols = 28, 28 # load training data and do basic data normalization (X_train, y_train), (X_test, y_test) = mnist.load_data() # the keras backend supports two different kind of image data format, # either channel first or channel last, we can detect it and transform # our raw data accordingly, if it's channel first, we add another dimension # to represent the depth (RGB color) at the very beginning (it is 1 here because # mnist is a grey scale image), if it's channel last, we add it at the end if K.image_data_format() == 'channels_first': X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols) X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # images takes values between 0 - 255, we can normalize it # by dividing every number by 255 X_train /= 255 X_test /= 255 print('train shape:', X_train.shape) # In[5]: # one-hot encode the class (target) vectors n_class = 10 y_train = np_utils.to_categorical(y_train, n_class) y_test = np_utils.to_categorical(y_test, n_class) print('y_train shape:', y_train.shape) #

#

# The following code chunk takes A WHILE if you're running it on a laptop!! #
# In[6]: model = Sequential() # apply a 32 3x3 filters for the first convolutional layer # then we specify the `padding` to be 'same' so we get # the same width and height for the input (it will automatically do zero-padding), # the default stride is 1, # and since this is the first layer we need to specify the input shape of the image model.add(Conv2D(32, kernel_size = (3, 3), padding = 'same', input_shape = input_shape)) # some activation function after conv layer model.add(Activation('relu')) model.add(Conv2D(64, kernel_size = (3, 3), padding = 'same')) model.add(Activation('relu')) # pooling layer, we specify the size of the filters for the pooling layer # the default `stride` is None, which will default to pool_size model.add(MaxPooling2D(pool_size = (2, 2))) # before calling the fully-connected layers, we'll have to flatten it model.add(Flatten()) model.add(Dense(n_class)) model.add(Activation('softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) n_epoch = 12 batch_size = 2056 model.fit(X_train, y_train, batch_size = batch_size, epochs = n_epoch, verbose = 1, validation_data = (X_test, y_test)) # evaluating the score, categorical cross entropy error and accuracy score = model.evaluate(X_test, y_test, verbose = 0) print('Test score:', score[0]) print('Test accuracy:', score[1]) # # Reference # - [Keras Example: mnist_cnn example](https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py)