import addutils.toc ; addutils.toc.js(ipy_notebook=True)
import numpy as np
import pandas as pd
from utilities import cifar10
from addutils import css_notebook
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
import random
import time
css_notebook()
import bokeh.plotting as bk
from bokeh.io import push_notebook
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource, Range1d
bk.output_notebook()
Keras is designed to be modular, minimalist and easily extensible. Francois Chollet, the author of Keras, says:
The library was developed with a focus on enabling fast experimentation. Being able to go from
idea to result with the least possible delay is key to doing good research.
Keras defines high-level Neural Network modules on top of either TensorFlow or Theano (nowadays has gained less attention thatn TensorFlow). It is possible to compose layer in a modular fashion and even extend the framework with user defined models.
Installation is easy. By now you should have the environment addfor_tutorials with TensorFlow installed. If either case is not true please refer to the README.md to install anaconda and notebook ml25v04_tensorflow_basic_concepts.ipynb to install TensorFlow.
Activate your addfor_tutorials environment and from the command-line type:
pip install keras
Now you can import Keras and check that it is using TensorFlow as its backend.
import keras
/home/teoz/anaconda3/envs/addfor_tutorials/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters Using TensorFlow backend.
print(keras.__version__)
2.1.4
Keras uses the backend to perform efficient symbolic computation on Tensors. There are two ways to compose models in Keras:
The sequential composer build a lists of modules that constituites the architecture of the network, for example a simple feed forward neural network for MNIST can be written as:
model = Sequential()
model.add(Dense(32, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
The functional API treat each layer as a function and allows to compose functions into a complex neural network. For example the network defined before can be expressed as somethign like: $$y=relu(f(\sigma(g(x)))$$ The same network can be defined with functional API with:
inputs = Input(shape=(784,))
x = Dense(32)(inputs)
x = Activation("relu")(x)
x = Dense(10)(x)
predictions = Activation("softmax")(x)
model = Model(inputs=inputs, outputs=predictions)
Each layer is a function, and since a model is a composition of layers, a model is also a function and can be treated as another layer by calling it on appropriately shaped input tensor.
Functional API can be used to define any kind of network, but there are some kind of networks that can be defined only using functional API. For example networks with multiple input and outputs or networks that use shared layers. As an example to define a multiple input-output network you can use:
model = Model(inputs=[input1, input2], outputs=[output1, output2])
A Dense model is a fully connected neural network layer.
keras.layers.Dense
Convolutional layers are principally:
keras.layers.convolutional.Conv1D
keras.layers.convolutional.Conv2D
keras.layers.pooling.MaxPooling1D
keras.layers.pooling.MaxPooling2D
Regularization layers:
keras.layers.core.Dropout
keras.layers.normalization.BatchNormalization
Activation functions: all principal activation functions are supported.
Losses: cross entropy, mean squared error and all popular losses are supported.
Metrics: a measure that tells how the model is performing.
Optimizers: such as Adam, Adagrad, RMSProp and plain SGD are all supported in module keras.optimizers
.
Beofre training a model, it is necessary to compile
it. Compile takes three arguments: an optimizer, a loss and a metric.
# For a multi-class classification problem
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
Once the model is compiled, training is performed by calling the fit method, for example:
# Train the model, iterating on the data in batches of 32 samples
model.fit(data, labels, epochs=10, batch_size=32)
In Keras is possible to save model architecture to yaml or json format by calling:
# model saving
json_string = model.to_json()
yaml_string = model.to_yaml()
# model reconstruction
model = model_from_json(json_string)
model = model_from_yaml(yaml_string)
Weights are saved in hdf5 format instead, by calling:
model.save('my_model.h5')
and restoring with:
model = load_model('my_model.h5')
One of the coolest things about Keras is the possiblity of adding callbacks during training. For example the model can decice when to stop based on a EarlyStopping condition or loss history can be saved (and later viewed with TensorBoard) at each iteration. Keras support model checkpointing in a similar way to TensorFlow.
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD, Adam, RMSprop
from keras.callbacks import TensorBoard
IMG_CHANNELS = 3
IMG_ROWS = 32
IMG_COLS = 32
cifar10.data_path = "example_data/CIFAR-10/"
cifar10.maybe_download_and_extract()
class_names = cifar10.load_class_names()
images_train, cls_train, labels_train = cifar10.load_training_data()
images_test, cls_test, labels_test = cifar10.load_test_data()
# constants
BATCH_SIZE = 128
NB_EPOCH = 6
NB_CLASSES = 10
VERBOSE = 0
VALIDATION_SPLIT = 0.2
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.7))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
It is possible to add custom callbacks, for example one that records the loss and accuracy.
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.accuracy = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.accuracy.append(logs.get('acc'))
Multiple callbacks can be added to the model, for example we can write logs to be read later by tensorboard.
callbacks = []
callbacks.append(TensorBoard(log_dir='temp/keras/logs'))
history_new = LossHistory()
callbacks.append(history_new)
The model returns the values recorded during the training (it is independent by the callbacks)
hist = model.fit(images_train, labels_train, batch_size=BATCH_SIZE,
epochs=NB_EPOCH, validation_split=VALIDATION_SPLIT,
verbose=VERBOSE, callbacks=callbacks)
score = model.evaluate(images_test, labels_test, batch_size=BATCH_SIZE, verbose=VERBOSE)
print('Test accuracy: {}'.format(score[1]))
print(hist.history)
fig = bk.figure(plot_width=600, plot_height=350, title=None)
fig.line(np.array(range(len(history_new.losses))), np.array(history_new.losses))
fig.line(np.array(range(len(history_new.accuracy))), np.array(history_new.accuracy),
color='red')
bk.show(fig)
For this example you need to download the stanford dog dataset (with corresponding annotaions and train/test split). Extract the dataset into the example_data directory and then execute following cell to split data in training and evalutaion sets.
import scipy.io
import os
import shutil
test_list = scipy.io.loadmat('example_data/lists/test_list.mat')
train_list = scipy.io.loadmat('example_data/lists/train_list.mat')
for el in train_list['file_list']:
dirname = os.path.dirname(el[0][0])
dstdir = os.path.join('example_data/data/train', dirname)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
srcfile = os.path.join('example_data/Images', el[0][0])
shutil.copy(srcfile, dstdir)
for el in test_list['file_list']:
dirname = os.path.dirname(el[0][0])
dstdir = os.path.join('example_data/data/test', dirname)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
srcfile = os.path.join('example_data/Images', el[0][0])
shutil.copy(srcfile, dstdir)
Now import the necessary modules and functions
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg16 import preprocess_input, VGG16
from keras.layers import Dense, GlobalAveragePooling2D
from keras.models import Model
We do not rely on numpy to import data into our model. Instead we use a datagenerator, similar to tf.data, to load the images directly from disk. With this function we are able to preprocess the input with additional functions, for example we use the function preprocess_input
to apply the same transformation to the images that VGG uses.
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
validation_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
batch_size = 32
train_generator = train_datagen.flow_from_directory('example_data/data/train', target_size=(224,224),
class_mode='categorical', shuffle=True,
batch_size=batch_size)
validation_generator = validation_datagen.flow_from_directory('example_data/data/test', target_size=(224,224),
class_mode='categorical', shuffle=False,
batch_size=batch_size)
Now use a pretrained version of VGG-16, trained on imagenet, but exclude the top layers. We add new layers to account for different number of classes. In this case we have 120 classes.
base_model = VGG16(weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(120, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_generator, epochs=10, validation_data=validation_generator)
Now the top layers should have learned the mapping from the pretrained weights to the correct classes. You can explore all layers of the model and decide which layer to freeze and which one to fine train. Remember that once frozen you have to compile the model again.
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
This example is analogous to the one in previous notebook.
data = pd.read_csv('example_data/data2.csv', parse_dates=['X0'])
prediction = 1
steps_forward = 12
steps_backward = 0
inputs_default = 0
hidden = 128
batch_size = 1024
timesteps = 12
epochs = 30
test_size = 0.4
def rolling_past(X, y, size):
X = pd.DataFrame(X)
y = pd.DataFrame(y)
dfs = [X.shift(i) for i in range(size)]
res = pd.concat(dfs, axis=1)
res['target'] = y
res.dropna(inplace=True, axis=0)
res_shuffle = res.iloc[np.random.permutation(len(res))]
res_y = res['target']
res.drop(['target'], axis=1, inplace=True)
res_y_shuffle = res_shuffle['target']
res_shuffle.drop(['target'], axis=1, inplace=True)
return (res.values.reshape(res.shape[0], size, -1), res_y.values.reshape((-1, 1)),
res_shuffle.values.reshape(res_shuffle.shape[0], size, -1), res_y_shuffle.values.reshape((-1,1)))
input_range = {
'X109': [steps_backward, steps_forward],
'X110': [steps_backward, steps_forward],
'X111': [steps_backward, steps_forward],
'X112': [steps_backward, steps_forward],
'X70': [steps_backward, steps_forward],
'X71': [steps_backward, steps_forward],
'X73': [steps_backward, steps_forward],
'X91': [steps_backward, steps_forward],
'X92': [steps_backward, steps_forward],
'X94': [steps_backward, steps_forward],
}
X_columns = ['X109', 'X54', 'X53', 'X71', 'X112', 'X59', 'X111', 'X92', 'X66',
'X94', 'X73', 'X91', 'X110', 'X40', 'y', 'X47', 'X48', 'X70', 'X60']
y_column = 'y'
def transform(source, y_column, X_columns, inputs_per_column,
inputs_default, steps_forward, dates='X0'):
dates = source[dates].iloc[:-steps_forward]
y = pd.DataFrame()
y[y_column] = source[y_column].shift(-steps_forward)
scaler = StandardScaler()
new_X = pd.DataFrame(scaler.fit_transform(source[X_columns]), columns=X_columns)
X = pd.DataFrame()
for column in X_columns:
if inputs_per_column:
inputs = inputs_per_column.get(column, None)
if inputs:
inputs_list = range(inputs[0], inputs[1] + 1)
else:
inputs_list = range(-inputs_default, 1)
else:
inputs_list = range(-inputs_default, 1)
for i in inputs_list:
col_name = "%s_%s" % (column, i)
X[col_name] = new_X[column].shift(-i) # Note: shift direction is inverted
null_indices = y.isnull().any(1).nonzero()[0]
X.drop(null_indices, axis=0, inplace=True)
X.dropna(inplace=True, axis=0)
y.dropna(inplace=True, axis=0)
return X, y, dates, X.index
def split(X, y, dates, test_size):
X.set_index(dates, inplace=True)
X_group = X.groupby(X.index.week)
y.set_index(dates, inplace=True)
y_group = y.groupby(X.index.week)
a = list(X_group.groups.keys())# [:-1]
random.shuffle(a)
sp = int(len(a) * test_size)
# train_weeks = sorted(a[sp:])
train_weeks = a[sp:]
# test_weeks = sorted(a[:sp])
test_weeks = a[:sp]
print('train_weeks: ', train_weeks)
X_train = pd.concat([X_group.get_group(i).reset_index(drop=True) for i in train_weeks])
X_test = pd.concat([X_group.get_group(i).reset_index(drop=True) for i in test_weeks])
y_train = pd.concat([y_group.get_group(i).reset_index(drop=True) for i in train_weeks])
y_test = pd.concat([y_group.get_group(i).reset_index(drop=True) for i in test_weeks])
return (X_train.values, y_train.values, X_test.values, y_test.values)
X, y, dates, _ = transform(source=data, y_column=y_column, X_columns=X_columns,
inputs_per_column=input_range, inputs_default=inputs_default,
steps_forward=steps_forward)
X_train, y_train, X_test, y_test = split(X, y, dates, test_size)
train_weeks: [24, 21, 17, 16, 15, 18, 25, 14]
X_train, y_train, X_train_shuffled, y_train_shuffled = rolling_past(X_train, y_train, timesteps)
X_test, y_test, X_test_shuffled, y_test_shuffled = rolling_past(X_test, y_test, timesteps)
X, y, _, _ = rolling_past(X, y, timesteps)
from keras.layers import LSTM
# Build the model
model = Sequential()
model.add(LSTM(hidden,
batch_input_shape=(None,
timesteps,
X.shape[2])))
model.add(Dense(y.shape[1]))
model.compile(loss='mean_squared_error', optimizer='adam')
# Fit the model
t0 = time.time()
model.fit(X_train_shuffled,
y_train_shuffled,
epochs=epochs,
batch_size=batch_size,
shuffle=False)
print('Training time: {:3.6f} s'.format(time.time() - t0))
Epoch 1/100 11906/11906 [==============================] - 5s 437us/step - loss: 451.0360 Epoch 2/100 11906/11906 [==============================] - 6s 544us/step - loss: 273.6115 Epoch 3/100 11906/11906 [==============================] - 6s 539us/step - loss: 187.5876 Epoch 4/100 11906/11906 [==============================] - 6s 541us/step - loss: 112.4112 Epoch 5/100 11906/11906 [==============================] - 6s 543us/step - loss: 50.7286 Epoch 6/100 11906/11906 [==============================] - 8s 653us/step - loss: 17.5353 Epoch 7/100 11906/11906 [==============================] - 7s 554us/step - loss: 5.2005 Epoch 8/100 11906/11906 [==============================] - 7s 572us/step - loss: 1.8245 Epoch 9/100 11906/11906 [==============================] - 7s 586us/step - loss: 1.1151 Epoch 10/100 11906/11906 [==============================] - 7s 590us/step - loss: 0.9922 Epoch 11/100 11906/11906 [==============================] - 7s 589us/step - loss: 0.9667 Epoch 12/100 11906/11906 [==============================] - 7s 565us/step - loss: 0.9467 Epoch 13/100 11906/11906 [==============================] - 7s 556us/step - loss: 0.9224 Epoch 14/100 11906/11906 [==============================] - 7s 552us/step - loss: 0.8951 Epoch 15/100 11906/11906 [==============================] - 8s 633us/step - loss: 0.8679 Epoch 16/100 11906/11906 [==============================] - 7s 599us/step - loss: 0.8440 Epoch 17/100 11906/11906 [==============================] - 7s 581us/step - loss: 0.8211 Epoch 18/100 11906/11906 [==============================] - 7s 570us/step - loss: 0.8014 Epoch 19/100 11906/11906 [==============================] - 8s 634us/step - loss: 0.7836 Epoch 20/100 11906/11906 [==============================] - 7s 611us/step - loss: 0.7673 Epoch 21/100 11906/11906 [==============================] - 7s 623us/step - loss: 0.7535 Epoch 22/100 11906/11906 [==============================] - 7s 612us/step - loss: 0.7417 Epoch 23/100 11906/11906 [==============================] - 6s 545us/step - loss: 0.7323 Epoch 24/100 11906/11906 [==============================] - 6s 544us/step - loss: 0.7243 Epoch 25/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.7172 Epoch 26/100 11906/11906 [==============================] - 6s 539us/step - loss: 0.7105 Epoch 27/100 11906/11906 [==============================] - 6s 540us/step - loss: 0.7037 Epoch 28/100 11906/11906 [==============================] - 6s 539us/step - loss: 0.6970 Epoch 29/100 11906/11906 [==============================] - 7s 584us/step - loss: 0.6901 Epoch 30/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.6830 Epoch 31/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.6752 Epoch 32/100 11906/11906 [==============================] - 7s 555us/step - loss: 0.6667 Epoch 33/100 11906/11906 [==============================] - 6s 546us/step - loss: 0.6580 Epoch 34/100 11906/11906 [==============================] - 6s 540us/step - loss: 0.6498 Epoch 35/100 11906/11906 [==============================] - 6s 538us/step - loss: 0.6420 Epoch 36/100 11906/11906 [==============================] - 6s 538us/step - loss: 0.6323 Epoch 37/100 11906/11906 [==============================] - 6s 540us/step - loss: 0.5936 Epoch 38/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.5757 Epoch 39/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.5597 Epoch 40/100 11906/11906 [==============================] - 6s 538us/step - loss: 0.5485 Epoch 41/100 11906/11906 [==============================] - 6s 540us/step - loss: 0.5375 Epoch 42/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.5268 Epoch 43/100 11906/11906 [==============================] - 6s 536us/step - loss: 0.5162 Epoch 44/100 11906/11906 [==============================] - 7s 548us/step - loss: 0.5060 Epoch 45/100 11906/11906 [==============================] - 6s 545us/step - loss: 0.4974 Epoch 46/100 11906/11906 [==============================] - 6s 546us/step - loss: 0.4897 Epoch 47/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.4820 Epoch 48/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.4745 Epoch 49/100 11906/11906 [==============================] - 6s 545us/step - loss: 0.4674 Epoch 50/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.4606 Epoch 51/100 11906/11906 [==============================] - 7s 558us/step - loss: 0.4541 Epoch 52/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.4477 Epoch 53/100 11906/11906 [==============================] - 6s 545us/step - loss: 0.4413 Epoch 54/100 11906/11906 [==============================] - 6s 537us/step - loss: 0.4353 Epoch 55/100 11906/11906 [==============================] - 6s 539us/step - loss: 0.4295 Epoch 56/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.4231 Epoch 57/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.4175 Epoch 58/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.4118 Epoch 59/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.4061 Epoch 60/100 11906/11906 [==============================] - 6s 539us/step - loss: 0.4009 Epoch 61/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.3958 Epoch 62/100 11906/11906 [==============================] - 6s 544us/step - loss: 0.3906 Epoch 63/100 11906/11906 [==============================] - 6s 536us/step - loss: 0.3854 Epoch 64/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.3805 Epoch 65/100 11906/11906 [==============================] - 6s 539us/step - loss: 0.3758 Epoch 66/100 11906/11906 [==============================] - 6s 545us/step - loss: 0.3711 Epoch 67/100 11906/11906 [==============================] - 7s 547us/step - loss: 0.3663 Epoch 68/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.3614 Epoch 69/100 11906/11906 [==============================] - 8s 640us/step - loss: 0.3567 Epoch 70/100 11906/11906 [==============================] - 8s 630us/step - loss: 0.3518 Epoch 71/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.3466 Epoch 72/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.3417 Epoch 73/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.3372 Epoch 74/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.3319 Epoch 75/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.3269 Epoch 76/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.3228 Epoch 77/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.3190 Epoch 78/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.3153 Epoch 79/100 11906/11906 [==============================] - 7s 559us/step - loss: 0.3111 Epoch 80/100 11906/11906 [==============================] - 6s 544us/step - loss: 0.3065 Epoch 81/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.3026 Epoch 82/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.2986 Epoch 83/100 11906/11906 [==============================] - 6s 544us/step - loss: 0.2943 Epoch 84/100 11906/11906 [==============================] - 6s 545us/step - loss: 0.2644 Epoch 85/100 11906/11906 [==============================] - 6s 543us/step - loss: 0.2195 Epoch 86/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.2069 Epoch 87/100 11906/11906 [==============================] - 6s 544us/step - loss: 0.1986 Epoch 88/100 11906/11906 [==============================] - 7s 556us/step - loss: 0.1945 Epoch 89/100 11906/11906 [==============================] - 6s 544us/step - loss: 0.1905 Epoch 90/100 11906/11906 [==============================] - 6s 545us/step - loss: 0.1864 Epoch 91/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.1833 Epoch 92/100 11906/11906 [==============================] - 6s 542us/step - loss: 0.1808 Epoch 93/100 11906/11906 [==============================] - 6s 541us/step - loss: 0.1785 Epoch 94/100 11906/11906 [==============================] - 6s 540us/step - loss: 0.1758 Epoch 95/100 11906/11906 [==============================] - 6s 538us/step - loss: 0.1734 Epoch 96/100 11906/11906 [==============================] - 6s 540us/step - loss: 0.1711 Epoch 97/100 11906/11906 [==============================] - 6s 538us/step - loss: 0.1690 Epoch 98/100 11906/11906 [==============================] - 6s 539us/step - loss: 0.1666 Epoch 99/100 11906/11906 [==============================] - 6s 538us/step - loss: 0.1643 Epoch 100/100 11906/11906 [==============================] - 6s 539us/step - loss: 0.1619 Training time: 658.307858 s
# Predict all dataset
t0 = time.time()
y_hat = model.predict(X, batch_size=batch_size)
print('Prediction time whole dataset: {:3.6f} s'.format(time.time() - t0))
Prediction time whole dataset: 3.522112 s
# Predict training set only
t0 = time.time()
y_train_predicted = model.predict(X_train, batch_size=batch_size)
print('Prediction time train set: {:3.6f} s'.format(time.time() - t0))
Prediction time train set: 1.974006 s
# Predict test set only
t0 = time.time()
y_test_predicted = model.predict(X_test, batch_size=batch_size)
print('Prediction time test set: {:3.6f} s'.format(time.time() - t0))
Prediction time test set: 1.323014 s
trainScore = np.sqrt(mean_squared_error(y_train, y_train_predicted))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = np.sqrt(mean_squared_error(y_test, y_test_predicted))
print('Test Score: %.2f RMSE' % (testScore))
Train Score: 0.40 RMSE Test Score: 0.42 RMSE
tools = 'pan,wheel_zoom,box_zoom,reset,save'
fig_b = bk.figure(plot_width=800, plot_height=350,
x_axis_label='time',
y_axis_label='value',
tools=tools)
fig_b.line(data['X0'].values[:len(y)], np.ravel(y), legend='true value')
fig_b.line(data['X0'].values[:len(y_hat)], np.ravel(y_hat), color='orange', legend='predicted value')
bk.show(fig_b)
Visit www.add-for.com for more tutorials and updates.
This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.