# 실행마다 동일한 결과를 얻기 위해 케라스에 랜덤 시드를 사용하고 텐서플로 연산을 결정적으로 만듭니다.
import tensorflow as tf
tf.keras.utils.set_random_seed(42)
tf.config.experimental.enable_op_determinism()
from tensorflow import keras
(train_input, train_target), (test_input, test_target) = keras.datasets.fashion_mnist.load_data()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 29515/29515 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26421880/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 5148/5148 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4422102/4422102 [==============================] - 0s 0us/step
print(train_input.shape, train_target.shape)
(60000, 28, 28) (60000,)
print(test_input.shape, test_target.shape)
(10000, 28, 28) (10000,)
import matplotlib.pyplot as plt
fig, axs = plt.subplots(1, 10, figsize=(10,10))
for i in range(10):
axs[i].imshow(train_input[i], cmap='gray_r')
axs[i].axis('off')
plt.show()
print([train_target[i] for i in range(10)])
[9, 0, 0, 3, 0, 2, 7, 2, 5, 5]
import numpy as np
print(np.unique(train_target, return_counts=True))
(array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint8), array([6000, 6000, 6000, 6000, 6000, 6000, 6000, 6000, 6000, 6000]))
train_scaled = train_input / 255.0
train_scaled = train_scaled.reshape(-1, 28*28)
print(train_scaled.shape)
(60000, 784)
from sklearn.model_selection import cross_validate
from sklearn.linear_model import SGDClassifier
sc = SGDClassifier(loss='log_loss', max_iter=5, random_state=42)
scores = cross_validate(sc, train_scaled, train_target, n_jobs=-1)
print(np.mean(scores['test_score']))
0.8196000000000001
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
train_scaled, val_scaled, train_target, val_target = train_test_split(
train_scaled, train_target, test_size=0.2, random_state=42)
print(train_scaled.shape, train_target.shape)
(48000, 784) (48000,)
print(val_scaled.shape, val_target.shape)
(12000, 784) (12000,)
dense = keras.layers.Dense(10, activation='softmax', input_shape=(784,))
model = keras.Sequential(dense)
model.compile(loss='sparse_categorical_crossentropy', metrics='accuracy')
print(train_target[:10])
[7 3 5 8 6 9 3 3 9 9]
model.fit(train_scaled, train_target, epochs=5)
Epoch 1/5 1500/1500 [==============================] - 8s 2ms/step - loss: 0.6069 - accuracy: 0.7947 Epoch 2/5 1500/1500 [==============================] - 5s 3ms/step - loss: 0.4742 - accuracy: 0.8382 Epoch 3/5 1500/1500 [==============================] - 4s 3ms/step - loss: 0.4508 - accuracy: 0.8474 Epoch 4/5 1500/1500 [==============================] - 3s 2ms/step - loss: 0.4367 - accuracy: 0.8527 Epoch 5/5 1500/1500 [==============================] - 3s 2ms/step - loss: 0.4280 - accuracy: 0.8555
<keras.src.callbacks.History at 0x7c5dd1bbbd30>
model.evaluate(val_scaled, val_target)
375/375 [==============================] - 1s 2ms/step - loss: 0.4526 - accuracy: 0.8465
[0.45262548327445984, 0.8464999794960022]