import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
layers = keras.layers
keras version 2.4.0
We start by creating a signal trace: t = 0-100
, f = sin(pi * t)
N = 10000
t = np.linspace(0, 100, N) # time steps
f = np.sin(np.pi * t) # signal
Split into semi-redundant sub-sequences of length = window_size + 1
and perform shuffle
window_size = 20
n = N - window_size - 1 # number of possible splits
data = np.stack([f[i: i + window_size + 1] for i in range(n)])
Finally, split the data into features
X, y = np.split(data, [-1], axis=1)
X = X[..., np.newaxis]
print('Example:')
print('X =', X[0, :, 0])
print('y =', y[0, :])
Example: X = [0. 0.0314139 0.06279679 0.0941177 0.1253457 0.15644998 0.18739983 0.21816471 0.24871423 0.27901826 0.30904688 0.33877044 0.36815961 0.39718538 0.42581909 0.45403249 0.48179773 0.50908739 0.53587454 0.56213275] y = [0.58783609]
z0 = layers.Input(shape=[None, 1])
z = layers.LSTM(16)(z0)
z = layers.Dense(1)(z)
model = keras.models.Model(inputs=z0, outputs=z)
print(model.summary())
model.compile(loss='mse', optimizer='adam')
Model: "model" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, None, 1)] 0 _________________________________________________________________ lstm (LSTM) (None, 16) 1152 _________________________________________________________________ dense (Dense) (None, 1) 17 ================================================================= Total params: 1,169 Trainable params: 1,169 Non-trainable params: 0 _________________________________________________________________ None
results = model.fit(X, y,
epochs=60,
batch_size=32,
verbose=2,
validation_split=0.1,
callbacks=[
keras.callbacks.ReduceLROnPlateau(factor=0.67, patience=3, verbose=1, min_lr=1E-5),
keras.callbacks.EarlyStopping(patience=4, verbose=1)])
Epoch 1/60 281/281 - 2s - loss: 0.0402 - val_loss: 0.0053 Epoch 2/60 281/281 - 1s - loss: 0.0011 - val_loss: 1.0384e-04 Epoch 3/60 281/281 - 1s - loss: 4.4316e-05 - val_loss: 2.1550e-05 Epoch 4/60 281/281 - 1s - loss: 1.8635e-05 - val_loss: 1.4417e-05 Epoch 5/60 281/281 - 1s - loss: 1.3515e-05 - val_loss: 1.1366e-05 Epoch 00005: ReduceLROnPlateau reducing learning rate to 0.0006700000318232924. Epoch 6/60 281/281 - 1s - loss: 1.0445e-05 - val_loss: 8.8832e-06 Epoch 7/60 281/281 - 1s - loss: 8.8564e-06 - val_loss: 8.0369e-06 Epoch 8/60 281/281 - 1s - loss: 7.3249e-06 - val_loss: 6.1736e-06 Epoch 00008: ReduceLROnPlateau reducing learning rate to 0.0004489000252215192. Epoch 9/60 281/281 - 1s - loss: 5.9330e-06 - val_loss: 5.1216e-06 Epoch 10/60 281/281 - 1s - loss: 5.0756e-06 - val_loss: 4.4159e-06 Epoch 11/60 281/281 - 1s - loss: 4.3783e-06 - val_loss: 4.4199e-06 Epoch 00011: ReduceLROnPlateau reducing learning rate to 0.0003007630087086. Epoch 12/60 281/281 - 1s - loss: 3.7236e-06 - val_loss: 3.5320e-06 Epoch 13/60 281/281 - 1s - loss: 3.3115e-06 - val_loss: 3.3500e-06 Epoch 14/60 281/281 - 1s - loss: 2.9173e-06 - val_loss: 2.6075e-06 Epoch 15/60 281/281 - 1s - loss: 2.5345e-06 - val_loss: 2.2070e-06 Epoch 00015: ReduceLROnPlateau reducing learning rate to 0.0002015112101798877. Epoch 16/60 281/281 - 1s - loss: 2.1699e-06 - val_loss: 1.8887e-06 Epoch 17/60 281/281 - 1s - loss: 1.8693e-06 - val_loss: 1.7786e-06 Epoch 18/60 281/281 - 1s - loss: 1.6997e-06 - val_loss: 1.4133e-06 Epoch 00018: ReduceLROnPlateau reducing learning rate to 0.00013501251160050743. Epoch 19/60 281/281 - 1s - loss: 1.4902e-06 - val_loss: 1.3295e-06 Epoch 20/60 281/281 - 1s - loss: 1.2765e-06 - val_loss: 1.1865e-06 Epoch 21/60 281/281 - 1s - loss: 1.1598e-06 - val_loss: 1.0442e-06 Epoch 00021: ReduceLROnPlateau reducing learning rate to 9.04583813098725e-05. Epoch 22/60 281/281 - 1s - loss: 1.0389e-06 - val_loss: 9.0986e-07 Epoch 23/60 281/281 - 1s - loss: 9.2497e-07 - val_loss: 8.2294e-07 Epoch 24/60 281/281 - 1s - loss: 8.5055e-07 - val_loss: 9.2430e-07 Epoch 00024: ReduceLROnPlateau reducing learning rate to 6.060711421014276e-05. Epoch 25/60 281/281 - 1s - loss: 7.5636e-07 - val_loss: 6.6940e-07 Epoch 26/60 281/281 - 1s - loss: 6.9989e-07 - val_loss: 7.2943e-07 Epoch 27/60 281/281 - 1s - loss: 6.4873e-07 - val_loss: 5.5948e-07 Epoch 00027: ReduceLROnPlateau reducing learning rate to 4.060676725202939e-05. Epoch 28/60 281/281 - 1s - loss: 5.7041e-07 - val_loss: 6.0144e-07 Epoch 29/60 281/281 - 1s - loss: 5.4323e-07 - val_loss: 4.9288e-07 Epoch 30/60 281/281 - 1s - loss: 4.9870e-07 - val_loss: 4.9515e-07 Epoch 00030: ReduceLROnPlateau reducing learning rate to 2.720653359574499e-05. Epoch 31/60 281/281 - 1s - loss: 4.5630e-07 - val_loss: 4.5954e-07 Epoch 32/60 281/281 - 1s - loss: 4.3995e-07 - val_loss: 3.9293e-07 Epoch 33/60 281/281 - 1s - loss: 4.1068e-07 - val_loss: 4.2055e-07 Epoch 00033: ReduceLROnPlateau reducing learning rate to 1.8228377484774683e-05. Epoch 34/60 281/281 - 1s - loss: 3.7825e-07 - val_loss: 3.6224e-07 Epoch 35/60 281/281 - 1s - loss: 3.6570e-07 - val_loss: 3.4233e-07 Epoch 36/60 281/281 - 1s - loss: 3.5367e-07 - val_loss: 3.3469e-07 Epoch 00036: ReduceLROnPlateau reducing learning rate to 1.2213012305437588e-05. Epoch 37/60 281/281 - 1s - loss: 3.2874e-07 - val_loss: 3.1918e-07 Epoch 38/60 281/281 - 2s - loss: 3.1865e-07 - val_loss: 2.9763e-07 Epoch 39/60 281/281 - 1s - loss: 3.0727e-07 - val_loss: 3.3634e-07 Epoch 00039: ReduceLROnPlateau reducing learning rate to 1e-05. Epoch 40/60 281/281 - 1s - loss: 2.9480e-07 - val_loss: 2.8321e-07 Epoch 41/60 281/281 - 1s - loss: 2.8227e-07 - val_loss: 2.7360e-07 Epoch 42/60 281/281 - 1s - loss: 2.7705e-07 - val_loss: 2.6126e-07 Epoch 43/60 281/281 - 1s - loss: 2.7010e-07 - val_loss: 2.9709e-07 Epoch 44/60 281/281 - 1s - loss: 2.5939e-07 - val_loss: 2.4549e-07 Epoch 45/60 281/281 - 1s - loss: 2.4846e-07 - val_loss: 2.3936e-07 Epoch 46/60 281/281 - 1s - loss: 2.4622e-07 - val_loss: 2.3825e-07 Epoch 47/60 281/281 - 1s - loss: 2.3851e-07 - val_loss: 2.2265e-07 Epoch 48/60 281/281 - 1s - loss: 2.3527e-07 - val_loss: 2.7506e-07 Epoch 49/60 281/281 - 1s - loss: 2.2538e-07 - val_loss: 2.0949e-07 Epoch 50/60 281/281 - 1s - loss: 2.2101e-07 - val_loss: 2.1906e-07 Epoch 51/60 281/281 - 2s - loss: 2.1342e-07 - val_loss: 2.2338e-07 Epoch 52/60 281/281 - 1s - loss: 2.1029e-07 - val_loss: 2.1525e-07 Epoch 53/60 281/281 - 1s - loss: 2.0469e-07 - val_loss: 1.9253e-07 Epoch 54/60 281/281 - 1s - loss: 2.0314e-07 - val_loss: 1.9931e-07 Epoch 55/60 281/281 - 1s - loss: 1.9546e-07 - val_loss: 1.8687e-07 Epoch 56/60 281/281 - 1s - loss: 1.9071e-07 - val_loss: 1.8670e-07 Epoch 57/60 281/281 - 1s - loss: 1.8853e-07 - val_loss: 1.9060e-07 Epoch 58/60 281/281 - 1s - loss: 1.8866e-07 - val_loss: 1.7612e-07 Epoch 59/60 281/281 - 1s - loss: 1.8681e-07 - val_loss: 2.0264e-07 Epoch 60/60 281/281 - 1s - loss: 1.8680e-07 - val_loss: 1.9440e-07
plt.figure(1, (12, 4))
plt.subplot(1, 2, 1)
plt.plot(results.history['loss'])
plt.plot(results.history['val_loss'])
plt.ylabel('loss')
plt.yscale("log")
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper right')
plt.tight_layout()
Investigate the forecasting capabilities of the model.
def predict_next_k(model, window, k=10):
"""Predict next k steps for the given model and starting sequence """
x = window[np.newaxis, :, np.newaxis] # initial input
y = np.zeros(k)
for i in range(k):
y[i] = model.predict(x, verbose=0)
# create the new input including the last prediction
x = np.roll(x, -1, axis=1) # shift all inputs 1 step to the left
x[:, -1] = y[i] # add latest prediction to end
return y
def plot_prediction(i0=0, k=500):
""" Predict and plot the next k steps for an input starting at i0 """
y0 = f[i0: i0 + window_size] # starting window (input)
y1 = predict_next_k(model, y0, k) # predict next k steps
t0 = t[i0: i0 + window_size]
t1 = t[i0 + window_size: i0 + window_size + k]
plt.figure(figsize=(12, 4))
plt.plot(t, f, label='data')
plt.plot(t0, y0, color='C1', lw=3, label='prediction')
plt.plot(t1, y1, color='C1', ls='--')
plt.xlim(0, 10)
plt.legend()
plt.xlabel('$t$')
plt.ylabel('$f(t)$')
plot_prediction(12)
plot_prediction(85)
plot_prediction(115)