이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.
주피터 노트북 뷰어로 보기 | 구글 코랩(Colab)에서 실행하기 |
이 노트북을 실행하려면 텐서플로 2.0.0-alpha0 버전 이상이 필요합니다.
import numpy as np
from tensorflow.keras.datasets import imdb
(x_train_all, y_train_all), (x_test, y_test) = imdb.load_data(skip_top=20, num_words=100)
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz 17465344/17464789 [==============================] - 1s 0us/step 17473536/17464789 [==============================] - 1s 0us/step
print(x_train_all[0])
[2, 2, 22, 2, 43, 2, 2, 2, 2, 65, 2, 2, 66, 2, 2, 2, 36, 2, 2, 25, 2, 43, 2, 2, 50, 2, 2, 2, 35, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 39, 2, 2, 2, 2, 2, 2, 38, 2, 2, 2, 2, 50, 2, 2, 2, 2, 2, 2, 22, 2, 2, 2, 2, 2, 22, 71, 87, 2, 2, 43, 2, 38, 76, 2, 2, 2, 2, 22, 2, 2, 2, 2, 2, 2, 2, 2, 2, 62, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 66, 2, 33, 2, 2, 2, 2, 38, 2, 2, 25, 2, 51, 36, 2, 48, 25, 2, 33, 2, 22, 2, 2, 28, 77, 52, 2, 2, 2, 2, 82, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 36, 71, 43, 2, 2, 26, 2, 2, 46, 2, 2, 2, 2, 2, 2, 88, 2, 2, 2, 2, 98, 32, 2, 56, 26, 2, 2, 2, 2, 2, 2, 2, 22, 21, 2, 2, 26, 2, 2, 2, 30, 2, 2, 51, 36, 28, 2, 92, 25, 2, 2, 2, 65, 2, 38, 2, 88, 2, 2, 2, 2, 2, 2, 2, 2, 32, 2, 2, 2, 2, 2, 32]
for i in range(len(x_train_all)):
x_train_all[i] = [w for w in x_train_all[i] if w > 2]
print(x_train_all[0])
[22, 43, 65, 66, 36, 25, 43, 50, 35, 39, 38, 50, 22, 22, 71, 87, 43, 38, 76, 22, 62, 66, 33, 38, 25, 51, 36, 48, 25, 33, 22, 28, 77, 52, 82, 36, 71, 43, 26, 46, 88, 98, 32, 56, 26, 22, 21, 26, 30, 51, 36, 28, 92, 25, 65, 38, 88, 32, 32]
word_to_index = imdb.get_word_index()
word_to_index['movie']
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb_word_index.json 1646592/1641221 [==============================] - 0s 0us/step 1654784/1641221 [==============================] - 0s 0us/step
17
index_to_word = {word_to_index[k]: k for k in word_to_index}
for w in x_train_all[0]:
print(index_to_word[w - 3], end=' ')
film just story really they you just there an from so there film film were great just so much film would really at so you what they if you at film have been good also they were just are out because them all up are film but are be what they have don't you story so because all all
print(x_train_all.shape, y_train_all.shape)
(25000,) (25000,)
print(len(x_train_all[0]), len(x_train_all[1]))
59 32
print(y_train_all[:10])
[1 0 0 1 0 0 1 0 1 0]
np.random.seed(42)
random_index = np.random.permutation(25000)
x_train = x_train_all[random_index[:20000]]
y_train = y_train_all[random_index[:20000]]
x_val = x_train_all[random_index[20000:]]
y_val = y_train_all[random_index[20000:]]
from tensorflow.keras.preprocessing import sequence
maxlen=100
x_train_seq = sequence.pad_sequences(x_train, maxlen=maxlen)
x_val_seq = sequence.pad_sequences(x_val, maxlen=maxlen)
print(x_train_seq.shape, x_val_seq.shape)
(20000, 100) (5000, 100)
print(x_train_seq[0])
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 35 40 27 28 40 22 83 31 85 45 24 23 31 70 31 76 30 98 32 22 28 51 75 56 30 33 97 53 38 46 53 74 31 35 23 34 22 58]
from tensorflow.keras.utils import to_categorical
x_train_onehot = to_categorical(x_train_seq)
x_val_onehot = to_categorical(x_val_seq)
print(x_train_onehot.shape)
(20000, 100, 100)
print(x_train_onehot.nbytes)
800000000
import tensorflow as tf
class RecurrentNetwork:
def __init__(self, n_cells=10, batch_size=32, learning_rate=0.1):
self.n_cells = n_cells # 셀 개수
self.batch_size = batch_size # 배치 크기
self.w1h = None # 은닉 상태에 대한 가중치
self.w1x = None # 입력에 대한 가중치
self.b1 = None # 순환층의 절편
self.w2 = None # 출력층의 가중치
self.b2 = None # 출력층의 절편
self.h = None # 순환층의 활성화 출력
self.losses = [] # 훈련 손실
self.val_losses = [] # 검증 손실
self.lr = learning_rate # 학습률
def forpass(self, x):
self.h = [np.zeros((x.shape[0], self.n_cells))] # 은닉 상태를 초기화합니다.
# 배치 차원과 타임 스텝 차원을 바꿉니다.
seq = np.swapaxes(x, 0, 1)
# 순환 층의 선형 식을 계산합니다.
for x in seq:
z1 = np.dot(x, self.w1x) + np.dot(self.h[-1], self.w1h) + self.b1
h = np.tanh(z1) # 활성화 함수를 적용합니다.
self.h.append(h) # 역전파를 위해 은닉 상태 저장합니다.
z2 = np.dot(h, self.w2) + self.b2 # 출력층의 선형 식을 계산합니다.
return z2
def backprop(self, x, err):
m = len(x) # 샘플 개수
# 출력층의 가중치와 절편에 대한 그래디언트를 계산합니다.
w2_grad = np.dot(self.h[-1].T, err) / m
b2_grad = np.sum(err) / m
# 배치 차원과 타임 스텝 차원을 바꿉니다.
seq = np.swapaxes(x, 0, 1)
w1h_grad = w1x_grad = b1_grad = 0
# 셀 직전까지 그래디언트를 계산합니다.
err_to_cell = np.dot(err, self.w2.T) * (1 - self.h[-1] ** 2)
# 모든 타임 스텝을 거슬러가면서 그래디언트를 전파합니다.
for x, h in zip(seq[::-1][:10], self.h[:-1][::-1][:10]):
w1h_grad += np.dot(h.T, err_to_cell)
w1x_grad += np.dot(x.T, err_to_cell)
b1_grad += np.sum(err_to_cell, axis=0)
# 이전 타임 스텝의 셀 직전까지 그래디언트를 계산합니다.
err_to_cell = np.dot(err_to_cell, self.w1h) * (1 - h ** 2)
w1h_grad /= m
w1x_grad /= m
b1_grad /= m
return w1h_grad, w1x_grad, b1_grad, w2_grad, b2_grad
def sigmoid(self, z):
z = np.clip(z, -100, None) # 안전한 np.exp() 계산을 위해
a = 1 / (1 + np.exp(-z)) # 시그모이드 계산
return a
def init_weights(self, n_features, n_classes):
orth_init = tf.initializers.Orthogonal()
glorot_init = tf.initializers.GlorotUniform()
self.w1h = orth_init((self.n_cells, self.n_cells)).numpy() # (셀 개수, 셀 개수)
self.w1x = glorot_init((n_features, self.n_cells)).numpy() # (특성 개수, 셀 개수)
self.b1 = np.zeros(self.n_cells) # 은닉층의 크기
self.w2 = glorot_init((self.n_cells, n_classes)).numpy() # (셀 개수, 클래스 개수)
self.b2 = np.zeros(n_classes)
def fit(self, x, y, epochs=100, x_val=None, y_val=None):
y = y.reshape(-1, 1)
y_val = y_val.reshape(-1, 1)
np.random.seed(42)
self.init_weights(x.shape[2], y.shape[1]) # 은닉층과 출력층의 가중치를 초기화합니다.
# epochs만큼 반복합니다.
for i in range(epochs):
print('에포크', i, end=' ')
# 제너레이터 함수에서 반환한 미니배치를 순환합니다.
batch_losses = []
for x_batch, y_batch in self.gen_batch(x, y):
print('.', end='')
a = self.training(x_batch, y_batch)
# 안전한 로그 계산을 위해 클리핑합니다.
a = np.clip(a, 1e-10, 1-1e-10)
# 로그 손실과 규제 손실을 더하여 리스트에 추가합니다.
loss = np.mean(-(y_batch*np.log(a) + (1-y_batch)*np.log(1-a)))
batch_losses.append(loss)
print()
self.losses.append(np.mean(batch_losses))
# 검증 세트에 대한 손실을 계산합니다.
self.update_val_loss(x_val, y_val)
# 미니배치 제너레이터 함수
def gen_batch(self, x, y):
length = len(x)
bins = length // self.batch_size # 미니배치 횟수
if length % self.batch_size:
bins += 1 # 나누어 떨어지지 않을 때
indexes = np.random.permutation(np.arange(len(x))) # 인덱스를 섞습니다.
x = x[indexes]
y = y[indexes]
for i in range(bins):
start = self.batch_size * i
end = self.batch_size * (i + 1)
yield x[start:end], y[start:end] # batch_size만큼 슬라이싱하여 반환합니다.
def training(self, x, y):
m = len(x) # 샘플 개수를 저장합니다.
z = self.forpass(x) # 정방향 계산을 수행합니다.
a = self.sigmoid(z) # 활성화 함수를 적용합니다.
err = -(y - a) # 오차를 계산합니다.
# 오차를 역전파하여 그래디언트를 계산합니다.
w1h_grad, w1x_grad, b1_grad, w2_grad, b2_grad = self.backprop(x, err)
# 셀의 가중치와 절편을 업데이트합니다.
self.w1h -= self.lr * w1h_grad
self.w1x -= self.lr * w1x_grad
self.b1 -= self.lr * b1_grad
# 출력층의 가중치와 절편을 업데이트합니다.
self.w2 -= self.lr * w2_grad
self.b2 -= self.lr * b2_grad
return a
def predict(self, x):
z = self.forpass(x) # 정방향 계산을 수행합니다.
return z > 0 # 스텝 함수를 적용합니다.
def score(self, x, y):
# 예측과 타깃 열 벡터를 비교하여 True의 비율을 반환합니다.
return np.mean(self.predict(x) == y.reshape(-1, 1))
def update_val_loss(self, x_val, y_val):
z = self.forpass(x_val) # 정방향 계산을 수행합니다.
a = self.sigmoid(z) # 활성화 함수를 적용합니다.
a = np.clip(a, 1e-10, 1-1e-10) # 출력 값을 클리핑합니다.
val_loss = np.mean(-(y_val*np.log(a) + (1-y_val)*np.log(1-a)))
self.val_losses.append(val_loss)
rn = RecurrentNetwork(n_cells=32, batch_size=32, learning_rate=0.01)
rn.fit(x_train_onehot, y_train, epochs=20, x_val=x_val_onehot, y_val=y_val)
에포크 0 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 1 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 2 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 3 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 4 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 5 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 6 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 7 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 8 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 9 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 10 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 11 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 12 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 13 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 14 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 15 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 16 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 17 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 18 ................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................. 에포크 19 .................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................
import matplotlib.pyplot as plt
plt.plot(rn.losses)
plt.plot(rn.val_losses)
plt.show()
rn.score(x_val_onehot, y_val)
0.6816
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, SimpleRNN
model = Sequential()
model.add(SimpleRNN(32, input_shape=(100, 100)))
model.add(Dense(1, activation='sigmoid'))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= simple_rnn (SimpleRNN) (None, 32) 4256 _________________________________________________________________ dense (Dense) (None, 1) 33 ================================================================= Total params: 4,289 Trainable params: 4,289 Non-trainable params: 0 _________________________________________________________________
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(x_train_onehot, y_train, epochs=20, batch_size=32,
validation_data=(x_val_onehot, y_val))
Epoch 1/20 625/625 [==============================] - 43s 64ms/step - loss: 0.6996 - accuracy: 0.5222 - val_loss: 0.6971 - val_accuracy: 0.5308 Epoch 2/20 625/625 [==============================] - 39s 63ms/step - loss: 0.6868 - accuracy: 0.5508 - val_loss: 0.6801 - val_accuracy: 0.5710 Epoch 3/20 625/625 [==============================] - 38s 62ms/step - loss: 0.6699 - accuracy: 0.5913 - val_loss: 0.6643 - val_accuracy: 0.6034 Epoch 4/20 625/625 [==============================] - 38s 61ms/step - loss: 0.6506 - accuracy: 0.6248 - val_loss: 0.6474 - val_accuracy: 0.6338 Epoch 5/20 625/625 [==============================] - 38s 61ms/step - loss: 0.6348 - accuracy: 0.6483 - val_loss: 0.6165 - val_accuracy: 0.6604 Epoch 6/20 625/625 [==============================] - 38s 62ms/step - loss: 0.6159 - accuracy: 0.6679 - val_loss: 0.6430 - val_accuracy: 0.6274 Epoch 7/20 625/625 [==============================] - 38s 61ms/step - loss: 0.6072 - accuracy: 0.6737 - val_loss: 0.6151 - val_accuracy: 0.6564 Epoch 8/20 625/625 [==============================] - 38s 61ms/step - loss: 0.5986 - accuracy: 0.6808 - val_loss: 0.6088 - val_accuracy: 0.6750 Epoch 9/20 625/625 [==============================] - 38s 61ms/step - loss: 0.5936 - accuracy: 0.6875 - val_loss: 0.6133 - val_accuracy: 0.6696 Epoch 10/20 625/625 [==============================] - 38s 61ms/step - loss: 0.5864 - accuracy: 0.6896 - val_loss: 0.5766 - val_accuracy: 0.6986 Epoch 11/20 625/625 [==============================] - 38s 61ms/step - loss: 0.5841 - accuracy: 0.6949 - val_loss: 0.5897 - val_accuracy: 0.6816 Epoch 12/20 625/625 [==============================] - 38s 61ms/step - loss: 0.5825 - accuracy: 0.6995 - val_loss: 0.5783 - val_accuracy: 0.6938 Epoch 13/20 625/625 [==============================] - 39s 62ms/step - loss: 0.5791 - accuracy: 0.6959 - val_loss: 0.6111 - val_accuracy: 0.6768 Epoch 14/20 625/625 [==============================] - 39s 62ms/step - loss: 0.5747 - accuracy: 0.7049 - val_loss: 0.5924 - val_accuracy: 0.6784 Epoch 15/20 625/625 [==============================] - 39s 62ms/step - loss: 0.5729 - accuracy: 0.7022 - val_loss: 0.5752 - val_accuracy: 0.7002 Epoch 16/20 625/625 [==============================] - 38s 62ms/step - loss: 0.5730 - accuracy: 0.7020 - val_loss: 0.5960 - val_accuracy: 0.6820 Epoch 17/20 625/625 [==============================] - 39s 62ms/step - loss: 0.5691 - accuracy: 0.7064 - val_loss: 0.6028 - val_accuracy: 0.6802 Epoch 18/20 625/625 [==============================] - 38s 61ms/step - loss: 0.5669 - accuracy: 0.7104 - val_loss: 0.5710 - val_accuracy: 0.7032 Epoch 19/20 625/625 [==============================] - 38s 61ms/step - loss: 0.5680 - accuracy: 0.7064 - val_loss: 0.5786 - val_accuracy: 0.6946 Epoch 20/20 625/625 [==============================] - 38s 61ms/step - loss: 0.5631 - accuracy: 0.7114 - val_loss: 0.5721 - val_accuracy: 0.6998
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.show()
loss, accuracy = model.evaluate(x_val_onehot, y_val, verbose=0)
print(accuracy)
0.6998000144958496
from tensorflow.keras.layers import Embedding
(x_train_all, y_train_all), (x_test, y_test) = imdb.load_data(skip_top=20, num_words=1000)
for i in range(len(x_train_all)):
x_train_all[i] = [w for w in x_train_all[i] if w > 2]
x_train = x_train_all[random_index[:20000]]
y_train = y_train_all[random_index[:20000]]
x_val = x_train_all[random_index[20000:]]
y_val = y_train_all[random_index[20000:]]
maxlen=100
x_train_seq = sequence.pad_sequences(x_train, maxlen=maxlen)
x_val_seq = sequence.pad_sequences(x_val, maxlen=maxlen)
model_ebd = Sequential()
model_ebd.add(Embedding(1000, 32))
model_ebd.add(SimpleRNN(8))
model_ebd.add(Dense(1, activation='sigmoid'))
model_ebd.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding (Embedding) (None, None, 32) 32000 _________________________________________________________________ simple_rnn_1 (SimpleRNN) (None, 8) 328 _________________________________________________________________ dense_1 (Dense) (None, 1) 9 ================================================================= Total params: 32,337 Trainable params: 32,337 Non-trainable params: 0 _________________________________________________________________
model_ebd.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model_ebd.fit(x_train_seq, y_train, epochs=10, batch_size=32,
validation_data=(x_val_seq, y_val))
Epoch 1/10 625/625 [==============================] - 61s 95ms/step - loss: 0.5373 - accuracy: 0.7305 - val_loss: 0.4704 - val_accuracy: 0.7860 Epoch 2/10 625/625 [==============================] - 58s 93ms/step - loss: 0.4016 - accuracy: 0.8282 - val_loss: 0.4096 - val_accuracy: 0.8234 Epoch 3/10 625/625 [==============================] - 58s 93ms/step - loss: 0.3492 - accuracy: 0.8560 - val_loss: 0.3966 - val_accuracy: 0.8322 Epoch 4/10 625/625 [==============================] - 58s 92ms/step - loss: 0.3245 - accuracy: 0.8676 - val_loss: 0.4025 - val_accuracy: 0.8290 Epoch 5/10 625/625 [==============================] - 58s 93ms/step - loss: 0.3012 - accuracy: 0.8783 - val_loss: 0.4075 - val_accuracy: 0.8208 Epoch 6/10 625/625 [==============================] - 58s 92ms/step - loss: 0.2806 - accuracy: 0.8874 - val_loss: 0.4429 - val_accuracy: 0.8240 Epoch 7/10 625/625 [==============================] - 57s 92ms/step - loss: 0.2685 - accuracy: 0.8913 - val_loss: 0.4307 - val_accuracy: 0.8166 Epoch 8/10 625/625 [==============================] - 57s 92ms/step - loss: 0.2562 - accuracy: 0.8992 - val_loss: 0.4336 - val_accuracy: 0.8266 Epoch 9/10 625/625 [==============================] - 58s 92ms/step - loss: 0.2397 - accuracy: 0.9056 - val_loss: 0.4497 - val_accuracy: 0.8070 Epoch 10/10 625/625 [==============================] - 56s 90ms/step - loss: 0.2461 - accuracy: 0.9032 - val_loss: 0.4627 - val_accuracy: 0.8222
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.show()
loss, accuracy = model_ebd.evaluate(x_val_seq, y_val, verbose=0)
print(accuracy)
0.8222000002861023
from tensorflow.keras.layers import LSTM
model_lstm = Sequential()
model_lstm.add(Embedding(1000, 32))
model_lstm.add(LSTM(8))
model_lstm.add(Dense(1, activation='sigmoid'))
model_lstm.summary()
Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding_1 (Embedding) (None, None, 32) 32000 _________________________________________________________________ lstm (LSTM) (None, 8) 1312 _________________________________________________________________ dense_2 (Dense) (None, 1) 9 ================================================================= Total params: 33,321 Trainable params: 33,321 Non-trainable params: 0 _________________________________________________________________
model_lstm.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model_lstm.fit(x_train_seq, y_train, epochs=10, batch_size=32,
validation_data=(x_val_seq, y_val))
Epoch 1/10 625/625 [==============================] - 19s 24ms/step - loss: 0.4649 - accuracy: 0.7789 - val_loss: 0.3846 - val_accuracy: 0.8302 Epoch 2/10 625/625 [==============================] - 14s 23ms/step - loss: 0.3422 - accuracy: 0.8553 - val_loss: 0.3633 - val_accuracy: 0.8410 Epoch 3/10 625/625 [==============================] - 14s 23ms/step - loss: 0.3174 - accuracy: 0.8649 - val_loss: 0.3667 - val_accuracy: 0.8438 Epoch 4/10 625/625 [==============================] - 14s 23ms/step - loss: 0.2994 - accuracy: 0.8744 - val_loss: 0.3676 - val_accuracy: 0.8362 Epoch 5/10 625/625 [==============================] - 15s 23ms/step - loss: 0.2858 - accuracy: 0.8778 - val_loss: 0.3697 - val_accuracy: 0.8416 Epoch 6/10 625/625 [==============================] - 15s 23ms/step - loss: 0.2717 - accuracy: 0.8858 - val_loss: 0.4014 - val_accuracy: 0.8434 Epoch 7/10 625/625 [==============================] - 14s 23ms/step - loss: 0.2584 - accuracy: 0.8924 - val_loss: 0.3867 - val_accuracy: 0.8402 Epoch 8/10 625/625 [==============================] - 14s 23ms/step - loss: 0.2463 - accuracy: 0.8967 - val_loss: 0.3921 - val_accuracy: 0.8356 Epoch 9/10 625/625 [==============================] - 14s 23ms/step - loss: 0.2337 - accuracy: 0.9011 - val_loss: 0.4145 - val_accuracy: 0.8392 Epoch 10/10 625/625 [==============================] - 14s 23ms/step - loss: 0.2232 - accuracy: 0.9099 - val_loss: 0.4468 - val_accuracy: 0.8372
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.show()
loss, accuracy = model_lstm.evaluate(x_val_seq, y_val, verbose=0)
print(accuracy)
0.8371999859809875