import tensorflow as tf
print(tf.__version__)
2.0.0-alpha0
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Dropout, LSTM
import cv2 #python -m pip install opencv-python
import numpy as np
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
import math
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import LearningRateScheduler
num_classes = 10
def load_cifar10_data(img_rows, img_cols):
# Load cifar10 training and test sets
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
# Resize training images
X_train = np.array([cv2.resize(img, (img_rows, img_cols)) for img in X_train[:, :, :, :]])
X_test = np.array([cv2.resize(img, (img_rows, img_cols)) for img in X_test[:, :, :, :]])
X_train = X_train.astype('float16') / 255.0
X_test = X_test.astype('float16') / 255.0
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2] * X_train.shape[3]))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], X_test.shape[2] * X_test.shape[3]))
# Transform targets to keras compatible format
Y_train = to_categorical(Y_train, num_classes)
Y_test = to_categorical(Y_test, num_classes)
print("X_train: {0}".format(X_train.shape))
print("Y_train: {0}".format(Y_train.shape))
print("X_test: {0}".format(X_test.shape))
print("Y_test: {0}".format(Y_test.shape))
return X_train, Y_train, X_test, Y_test
X_train, y_train, X_test, y_test = load_cifar10_data(224, 224)
X_train: (50000, 224, 672) Y_train: (50000, 10) X_test: (10000, 224, 672) Y_test: (10000, 10)
series_size=224
feature_size=224*3
kernel_init = tf.keras.initializers.glorot_uniform()
bias_init = tf.keras.initializers.Constant(value=0.2)
def build_rnn_model():
input_layer = Input(batch_shape=(None, series_size, feature_size), name="state")
rnn_1 = LSTM(
units=256,
input_shape=(series_size, feature_size), # (타임스텝, 속성)
activation='relu',
dropout=0.5,
stateful=True,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
return_sequences=True)(input_layer)
rnn_2 = LSTM(
units=256,
activation="relu",
dropout=0.5,
stateful=True,
kernel_initializer=kernel_init,
bias_initializer=bias_init,
return_sequences=False)(rnn_1)
hidden_layer = Dense(
units=128,
activation='relu',
kernel_initializer=kernel_init,
bias_initializer=bias_init)(rnn_2)
dropout_layer = Dropout(rate=0.5)(hidden_layer)
softmax = Dense(units=num_classes, activation='softmax', name='softmax')(dropout_layer)
model = Model(inputs=input_layer, outputs=softmax)
return model
model = build_rnn_model()
W0513 00:02:10.615107 140528954898240 tf_logging.py:161] <tensorflow.python.keras.layers.recurrent.UnifiedLSTM object at 0x7fcee2bfdcc0>: Note that this layer is not optimized for performance. Please use tf.keras.layers.CuDNNLSTM for better performance on GPU. W0513 00:02:10.931151 140528954898240 tf_logging.py:161] <tensorflow.python.keras.layers.recurrent.UnifiedLSTM object at 0x7fce0c13b748>: Note that this layer is not optimized for performance. Please use tf.keras.layers.CuDNNLSTM for better performance on GPU.
initial_lrate = 0.01
def decay(epoch, steps=100):
drop = 0.96
epochs_drop = 8
lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))
return lrate
lr_sc = LearningRateScheduler(decay, verbose=1)
sgd = SGD(lr=initial_lrate, momentum=0.9, nesterov=True)
model.compile(
loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy']
)
epochs = 35
history = model.fit(
x=X_train,
y=y_train,
validation_data=(X_test, y_test),
epochs=epochs, batch_size=256, callbacks=[lr_sc]
)
Train on 50000 samples, validate on 10000 samples Epoch 00001: LearningRateScheduler reducing learning rate to 0.01. Epoch 1/35 50000/50000 [==============================] - 856s 17ms/sample - loss: 69.8404 - accuracy: 0.0985 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00002: LearningRateScheduler reducing learning rate to 0.01. Epoch 2/35 50000/50000 [==============================] - 855s 17ms/sample - loss: 2.3027 - accuracy: 0.0966 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00003: LearningRateScheduler reducing learning rate to 0.01. Epoch 3/35 50000/50000 [==============================] - 855s 17ms/sample - loss: 2.3027 - accuracy: 0.0962 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00004: LearningRateScheduler reducing learning rate to 0.01. Epoch 4/35 50000/50000 [==============================] - 860s 17ms/sample - loss: 2.3027 - accuracy: 0.0963 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00005: LearningRateScheduler reducing learning rate to 0.01. Epoch 5/35 50000/50000 [==============================] - 856s 17ms/sample - loss: 2.3027 - accuracy: 0.0966 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00006: LearningRateScheduler reducing learning rate to 0.01. Epoch 6/35 50000/50000 [==============================] - 856s 17ms/sample - loss: 2.3027 - accuracy: 0.0972 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00007: LearningRateScheduler reducing learning rate to 0.01. Epoch 7/35 50000/50000 [==============================] - 850s 17ms/sample - loss: 2.3027 - accuracy: 0.0992 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00008: LearningRateScheduler reducing learning rate to 0.0096. Epoch 8/35 50000/50000 [==============================] - 848s 17ms/sample - loss: 2.3027 - accuracy: 0.0981 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00009: LearningRateScheduler reducing learning rate to 0.0096. Epoch 9/35 50000/50000 [==============================] - 851s 17ms/sample - loss: 2.3027 - accuracy: 0.0983 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00010: LearningRateScheduler reducing learning rate to 0.0096. Epoch 10/35 50000/50000 [==============================] - 859s 17ms/sample - loss: 2.3027 - accuracy: 0.0993 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00011: LearningRateScheduler reducing learning rate to 0.0096. Epoch 11/35 50000/50000 [==============================] - 852s 17ms/sample - loss: 2.3027 - accuracy: 0.0988 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00012: LearningRateScheduler reducing learning rate to 0.0096. Epoch 12/35 50000/50000 [==============================] - 857s 17ms/sample - loss: 2.3027 - accuracy: 0.0980 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00013: LearningRateScheduler reducing learning rate to 0.0096. Epoch 13/35 50000/50000 [==============================] - 853s 17ms/sample - loss: 2.3027 - accuracy: 0.0986 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00014: LearningRateScheduler reducing learning rate to 0.0096. Epoch 14/35 50000/50000 [==============================] - 859s 17ms/sample - loss: 2.3027 - accuracy: 0.0972 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00015: LearningRateScheduler reducing learning rate to 0.0096. Epoch 15/35 50000/50000 [==============================] - 852s 17ms/sample - loss: 2.3027 - accuracy: 0.0988 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00016: LearningRateScheduler reducing learning rate to 0.009216. Epoch 16/35 50000/50000 [==============================] - 854s 17ms/sample - loss: 2.3027 - accuracy: 0.0989 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00017: LearningRateScheduler reducing learning rate to 0.009216. Epoch 17/35 50000/50000 [==============================] - 857s 17ms/sample - loss: 2.3027 - accuracy: 0.0983 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00018: LearningRateScheduler reducing learning rate to 0.009216. Epoch 18/35 50000/50000 [==============================] - 857s 17ms/sample - loss: 2.3027 - accuracy: 0.0962 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00019: LearningRateScheduler reducing learning rate to 0.009216. Epoch 19/35 50000/50000 [==============================] - 848s 17ms/sample - loss: 2.3027 - accuracy: 0.0984 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00020: LearningRateScheduler reducing learning rate to 0.009216. Epoch 20/35 50000/50000 [==============================] - 858s 17ms/sample - loss: 2.3027 - accuracy: 0.0997 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00021: LearningRateScheduler reducing learning rate to 0.009216. Epoch 21/35 50000/50000 [==============================] - 854s 17ms/sample - loss: 2.3027 - accuracy: 0.0982 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00022: LearningRateScheduler reducing learning rate to 0.009216. Epoch 22/35 50000/50000 [==============================] - 851s 17ms/sample - loss: 2.3027 - accuracy: 0.0976 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00023: LearningRateScheduler reducing learning rate to 0.009216. Epoch 23/35 50000/50000 [==============================] - 850s 17ms/sample - loss: 2.3027 - accuracy: 0.0974 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00024: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 24/35 50000/50000 [==============================] - 850s 17ms/sample - loss: 2.3027 - accuracy: 0.0978 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00025: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 25/35 50000/50000 [==============================] - 848s 17ms/sample - loss: 2.3027 - accuracy: 0.0981 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00026: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 26/35 50000/50000 [==============================] - 851s 17ms/sample - loss: 2.3027 - accuracy: 0.0986 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00027: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 27/35 50000/50000 [==============================] - 849s 17ms/sample - loss: 2.3027 - accuracy: 0.0991 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00028: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 28/35 50000/50000 [==============================] - 852s 17ms/sample - loss: 2.3027 - accuracy: 0.0986 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00029: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 29/35 50000/50000 [==============================] - 844s 17ms/sample - loss: 2.3027 - accuracy: 0.0991 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00030: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 30/35 50000/50000 [==============================] - 850s 17ms/sample - loss: 2.3027 - accuracy: 0.0982 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00031: LearningRateScheduler reducing learning rate to 0.008847359999999999. Epoch 31/35 50000/50000 [==============================] - 852s 17ms/sample - loss: 2.3027 - accuracy: 0.0981 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00032: LearningRateScheduler reducing learning rate to 0.008493465599999998. Epoch 32/35 50000/50000 [==============================] - 847s 17ms/sample - loss: 2.3027 - accuracy: 0.0988 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00033: LearningRateScheduler reducing learning rate to 0.008493465599999998. Epoch 33/35 50000/50000 [==============================] - 848s 17ms/sample - loss: 2.3027 - accuracy: 0.0988 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00034: LearningRateScheduler reducing learning rate to 0.008493465599999998. Epoch 34/35 50000/50000 [==============================] - 847s 17ms/sample - loss: 2.3027 - accuracy: 0.0976 - val_loss: 2.3026 - val_accuracy: 0.1000 Epoch 00035: LearningRateScheduler reducing learning rate to 0.008493465599999998. Epoch 35/35 50000/50000 [==============================] - 856s 17ms/sample - loss: 2.3027 - accuracy: 0.0982 - val_loss: 2.3026 - val_accuracy: 0.1000