이 노트북에서 LSTM을 사용해 감성에 따라 IMDB 영화 리뷰를 분류합니다.
from tensorflow import keras
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Embedding, SpatialDropout1D
from tensorflow.keras.layers import LSTM # new!
from tensorflow.keras.callbacks import ModelCheckpoint
import os
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
%matplotlib inline
# 출력 디렉토리
output_dir = 'model_output/LSTM'
# 훈련
epochs = 4
batch_size = 128
# 벡터 공간 임베딩
n_dim = 64
n_unique_words = 10000
max_review_length = 100
pad_type = trunc_type = 'pre'
drop_embed = 0.2
# LSTM 층 구조
n_lstm = 256
drop_lstm = 0.2
# 밀집 층 구조
# n_dense = 256
# dropout = 0.2
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words) # n_words_to_skip 삭제
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz 17464789/17464789 [==============================] - 2s 0us/step
x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_length))
model.add(SpatialDropout1D(drop_embed))
model.add(LSTM(n_lstm, dropout=drop_lstm))
# model.add(Dense(n_dense, activation='relu'))
# model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid'))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding (Embedding) (None, 100, 64) 640000 spatial_dropout1d (SpatialD (None, 100, 64) 0 ropout1D) lstm (LSTM) (None, 256) 328704 dense (Dense) (None, 1) 257 ================================================================= Total params: 968,961 Trainable params: 968,961 Non-trainable params: 0 _________________________________________________________________
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])
Epoch 1/4 196/196 [==============================] - 13s 24ms/step - loss: 0.5580 - accuracy: 0.6909 - val_loss: 0.3781 - val_accuracy: 0.8311 Epoch 2/4 196/196 [==============================] - 4s 21ms/step - loss: 0.3123 - accuracy: 0.8682 - val_loss: 0.3426 - val_accuracy: 0.8500 Epoch 3/4 196/196 [==============================] - 4s 21ms/step - loss: 0.2442 - accuracy: 0.9048 - val_loss: 0.3634 - val_accuracy: 0.8462 Epoch 4/4 196/196 [==============================] - 4s 21ms/step - loss: 0.1989 - accuracy: 0.9231 - val_loss: 0.3946 - val_accuracy: 0.8418
<keras.callbacks.History at 0x7f44281241c0>
model.load_weights(output_dir+"/weights.02.hdf5")
y_hat = model.predict(x_valid)
782/782 [==============================] - 3s 4ms/step
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
"{:0.2f}".format(roc_auc_score(y_valid, y_hat)*100.0)
'92.80'