이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.
주피터 노트북 뷰어로 보기 | 구글 코랩(Colab)에서 실행하기 |
이 노트북을 실행하려면 텐서플로 2.0.0-alpha0 버전 이상이 필요합니다.
import numpy as np
w = np.array([2, 1, 5, 3])
x = np.array([2, 8, 3, 7, 1, 2, 0, 4, 5])
w_r = np.flip(w)
print(w_r)
[3 5 1 2]
w[0:4:2]
array([2, 5])
for i in range(6):
print(np.dot(x[i:i+4], w_r))
63 48 49 28 21 20
from scipy.signal import convolve
convolve(x, w, mode='valid')
array([63, 48, 49, 28, 21, 20])
from scipy.signal import correlate
correlate(x, w, mode='valid')
array([48, 57, 24, 25, 16, 39])
correlate(x, w, mode='full')
array([ 6, 34, 51, 48, 57, 24, 25, 16, 39, 29, 13, 10])
correlate(x, w, mode='same')
array([34, 51, 48, 57, 24, 25, 16, 39, 29])
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
w = np.array([[2, 0], [0, 0]])
from scipy.signal import correlate2d
correlate2d(x, w, mode='valid')
array([[ 2, 4], [ 8, 10]])
np.flip(w)
array([[0, 0], [0, 2]])
from scipy.signal import convolve2d
convolve2d(x, w, mode='valid')
array([[10, 12], [16, 18]])
correlate2d(x, w, mode='same')
array([[ 2, 4, 6], [ 8, 10, 12], [14, 16, 18]])
import tensorflow as tf
x_4d = x.astype(np.float).reshape(1, 3, 3, 1)
w_4d = w.reshape(2, 2, 1, 1)
c_out = tf.nn.conv2d(x_4d, w_4d, strides=1, padding='SAME')
c_out.numpy().reshape(3, 3)
array([[ 2., 4., 6.], [ 8., 10., 12.], [14., 16., 18.]])
x = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
x = x.reshape(1, 4, 4, 1)
p_out = tf.nn.max_pool2d(x, ksize=2, strides=2, padding='VALID')
p_out.numpy().reshape(2, 2)
array([[ 6., 8.], [14., 16.]], dtype=float32)
def relu(x):
return np.maximum(x, 0)
x = np.array([-1, 2, -3, 4, -5])
relu(x)
array([0, 2, 0, 4, 0])
r_out = tf.nn.relu(x)
r_out.numpy()
array([0, 2, 0, 4, 0])
import tensorflow as tf
class ConvolutionNetwork:
def __init__(self, n_kernels=10, units=10, batch_size=32, learning_rate=0.1):
self.n_kernels = n_kernels # 합성곱의 커널 개수
self.kernel_size = 3 # 커널 크기
self.optimizer = None # 옵티마이저
self.conv_w = None # 합성곱 층의 가중치
self.conv_b = None # 합성곱 층의 절편
self.units = units # 은닉층의 뉴런 개수
self.batch_size = batch_size # 배치 크기
self.w1 = None # 은닉층의 가중치
self.b1 = None # 은닉층의 절편
self.w2 = None # 출력층의 가중치
self.b2 = None # 출력층의 절편
self.a1 = None # 은닉층의 활성화 출력
self.losses = [] # 훈련 손실
self.val_losses = [] # 검증 손실
self.lr = learning_rate # 학습률
def forpass(self, x):
# 3x3 합성곱 연산을 수행합니다.
c_out = tf.nn.conv2d(x, self.conv_w, strides=1, padding='SAME') + self.conv_b
# 렐루 활성화 함수를 적용합니다.
r_out = tf.nn.relu(c_out)
# 2x2 최대 풀링을 적용합니다.
p_out = tf.nn.max_pool2d(r_out, ksize=2, strides=2, padding='VALID')
# 첫 번째 배치 차원을 제외하고 출력을 일렬로 펼칩니다.
f_out = tf.reshape(p_out, [x.shape[0], -1])
z1 = tf.matmul(f_out, self.w1) + self.b1 # 첫 번째 층의 선형 식을 계산합니다
a1 = tf.nn.relu(z1) # 활성화 함수를 적용합니다
z2 = tf.matmul(a1, self.w2) + self.b2 # 두 번째 층의 선형 식을 계산합니다.
return z2
def init_weights(self, input_shape, n_classes):
g = tf.initializers.glorot_uniform()
self.conv_w = tf.Variable(g((3, 3, 1, self.n_kernels)))
self.conv_b = tf.Variable(np.zeros(self.n_kernels), dtype=float)
n_features = 14 * 14 * self.n_kernels
self.w1 = tf.Variable(g((n_features, self.units))) # (특성 개수, 은닉층의 크기)
self.b1 = tf.Variable(np.zeros(self.units), dtype=float) # 은닉층의 크기
self.w2 = tf.Variable(g((self.units, n_classes))) # (은닉층의 크기, 클래스 개수)
self.b2 = tf.Variable(np.zeros(n_classes), dtype=float) # 클래스 개수
def fit(self, x, y, epochs=100, x_val=None, y_val=None):
self.init_weights(x.shape, y.shape[1]) # 은닉층과 출력층의 가중치를 초기화합니다.
self.optimizer = tf.optimizers.SGD(learning_rate=self.lr)
# epochs만큼 반복합니다.
for i in range(epochs):
print('에포크', i, end=' ')
# 제너레이터 함수에서 반환한 미니배치를 순환합니다.
batch_losses = []
for x_batch, y_batch in self.gen_batch(x, y):
print('.', end='')
self.training(x_batch, y_batch)
# 배치 손실을 기록합니다.
batch_losses.append(self.get_loss(x_batch, y_batch))
print()
# 배치 손실 평균내어 훈련 손실 값으로 저장합니다.
self.losses.append(np.mean(batch_losses))
# 검증 세트에 대한 손실을 계산합니다.
self.val_losses.append(self.get_loss(x_val, y_val))
# 미니배치 제너레이터 함수
def gen_batch(self, x, y):
bins = len(x) // self.batch_size # 미니배치 횟수
indexes = np.random.permutation(np.arange(len(x))) # 인덱스를 섞습니다.
x = x[indexes]
y = y[indexes]
for i in range(bins):
start = self.batch_size * i
end = self.batch_size * (i + 1)
yield x[start:end], y[start:end] # batch_size만큼 슬라이싱하여 반환합니다.
def training(self, x, y):
m = len(x) # 샘플 개수를 저장합니다.
with tf.GradientTape() as tape:
z = self.forpass(x) # 정방향 계산을 수행합니다.
# 손실을 계산합니다.
loss = tf.nn.softmax_cross_entropy_with_logits(y, z)
loss = tf.reduce_mean(loss)
weights_list = [self.conv_w, self.conv_b,
self.w1, self.b1, self.w2, self.b2]
# 가중치에 대한 그래디언트를 계산합니다.
grads = tape.gradient(loss, weights_list)
# 가중치를 업데이트합니다.
self.optimizer.apply_gradients(zip(grads, weights_list))
def predict(self, x):
z = self.forpass(x) # 정방향 계산을 수행합니다.
return np.argmax(z.numpy(), axis=1) # 가장 큰 값의 인덱스를 반환합니다.
def score(self, x, y):
# 예측과 타깃 열 벡터를 비교하여 True의 비율을 반환합니다.
return np.mean(self.predict(x) == np.argmax(y, axis=1))
def get_loss(self, x, y):
z = self.forpass(x) # 정방향 계산을 수행합니다.
# 손실을 계산하여 저장합니다.
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, z))
return loss.numpy()
x = tf.Variable(np.array([1.0, 2.0, 3.0]))
with tf.GradientTape() as tape:
y = x ** 3 + 2 * x + 5
# 그래디언트를 계산합니
print(tape.gradient(y, x))
tf.Tensor([ 5. 14. 29.], shape=(3,), dtype=float64)
x = tf.Variable(np.array([1.0, 2.0, 3.0]))
with tf.GradientTape() as tape:
y = tf.nn.softmax(x)
# 그래디언트를 계산합니다.
print(tape.gradient(y, x))
tf.Tensor([9.99540153e-18 2.71703183e-17 7.38565826e-17], shape=(3,), dtype=float64)
(x_train_all, y_train_all), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 32768/29515 [=================================] - 0s 0us/step 40960/29515 [=========================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26427392/26421880 [==============================] - 0s 0us/step 26435584/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 16384/5148 [===============================================================================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4423680/4422102 [==============================] - 0s 0us/step 4431872/4422102 [==============================] - 0s 0us/step
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train_all, y_train_all, stratify=y_train_all,
test_size=0.2, random_state=42)
y_train_encoded = tf.keras.utils.to_categorical(y_train)
y_val_encoded = tf.keras.utils.to_categorical(y_val)
x_train = x_train.reshape(-1, 28, 28, 1)
x_val = x_val.reshape(-1, 28, 28, 1)
x_train.shape
(48000, 28, 28, 1)
x_train = x_train / 255
x_val = x_val / 255
cn = ConvolutionNetwork(n_kernels=10, units=100, batch_size=128, learning_rate=0.01)
cn.fit(x_train, y_train_encoded,
x_val=x_val, y_val=y_val_encoded, epochs=20)
에포크 0 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 1 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 2 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 3 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 4 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 5 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 6 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 7 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 8 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 9 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 10 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 11 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 12 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 13 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 14 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 15 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 16 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 17 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 18 ....................................................................................................................................................................................................................................................................................................................................................................................... 에포크 19 .......................................................................................................................................................................................................................................................................................................................................................................................
import matplotlib.pyplot as plt
plt.plot(cn.losses)
plt.plot(cn.val_losses)
plt.ylabel('loss')
plt.xlabel('iteration')
plt.legend(['train_loss', 'val_loss'])
plt.show()
cn.score(x_val, y_val_encoded)
0.8745833333333334
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
conv1 = tf.keras.Sequential()
conv1.add(Conv2D(10, (3, 3), activation='relu', padding='same', input_shape=(28, 28, 1)))
conv1.add(MaxPooling2D((2, 2)))
conv1.add(Flatten())
conv1.add(Dense(100, activation='relu'))
conv1.add(Dense(10, activation='softmax'))
conv1.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 28, 28, 10) 100 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 14, 14, 10) 0 _________________________________________________________________ flatten (Flatten) (None, 1960) 0 _________________________________________________________________ dense (Dense) (None, 100) 196100 _________________________________________________________________ dense_1 (Dense) (None, 10) 1010 ================================================================= Total params: 197,210 Trainable params: 197,210 Non-trainable params: 0 _________________________________________________________________
conv1.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
history = conv1.fit(x_train, y_train_encoded, epochs=20,
validation_data=(x_val, y_val_encoded))
Epoch 1/20 1500/1500 [==============================] - 8s 5ms/step - loss: 0.4359 - accuracy: 0.8482 - val_loss: 0.3272 - val_accuracy: 0.8832 Epoch 2/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.2964 - accuracy: 0.8919 - val_loss: 0.2807 - val_accuracy: 0.8992 Epoch 3/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.2556 - accuracy: 0.9076 - val_loss: 0.2605 - val_accuracy: 0.9043 Epoch 4/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.2269 - accuracy: 0.9162 - val_loss: 0.2555 - val_accuracy: 0.9064 Epoch 5/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.2061 - accuracy: 0.9238 - val_loss: 0.2682 - val_accuracy: 0.9051 Epoch 6/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1851 - accuracy: 0.9315 - val_loss: 0.2357 - val_accuracy: 0.9176 Epoch 7/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1678 - accuracy: 0.9381 - val_loss: 0.2510 - val_accuracy: 0.9147 Epoch 8/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1524 - accuracy: 0.9443 - val_loss: 0.2571 - val_accuracy: 0.9152 Epoch 9/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1383 - accuracy: 0.9481 - val_loss: 0.2554 - val_accuracy: 0.9174 Epoch 10/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.1228 - accuracy: 0.9550 - val_loss: 0.2503 - val_accuracy: 0.9204 Epoch 11/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.1111 - accuracy: 0.9595 - val_loss: 0.2748 - val_accuracy: 0.9140 Epoch 12/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.0995 - accuracy: 0.9640 - val_loss: 0.2701 - val_accuracy: 0.9198 Epoch 13/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.0871 - accuracy: 0.9688 - val_loss: 0.2857 - val_accuracy: 0.9183 Epoch 14/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.0770 - accuracy: 0.9729 - val_loss: 0.2975 - val_accuracy: 0.9185 Epoch 15/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.0712 - accuracy: 0.9752 - val_loss: 0.3074 - val_accuracy: 0.9188 Epoch 16/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.0598 - accuracy: 0.9783 - val_loss: 0.3323 - val_accuracy: 0.9190 Epoch 17/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.0565 - accuracy: 0.9790 - val_loss: 0.3530 - val_accuracy: 0.9154 Epoch 18/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.0477 - accuracy: 0.9832 - val_loss: 0.3603 - val_accuracy: 0.9172 Epoch 19/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.0434 - accuracy: 0.9851 - val_loss: 0.3734 - val_accuracy: 0.9203 Epoch 20/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.0401 - accuracy: 0.9858 - val_loss: 0.3936 - val_accuracy: 0.9163
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'val_loss'])
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_accuracy', 'val_accuracy'])
plt.show()
loss, accuracy = conv1.evaluate(x_val, y_val_encoded, verbose=0)
print(accuracy)
0.9163333177566528
from tensorflow.keras.layers import Dropout
conv2 = tf.keras.Sequential()
conv2.add(Conv2D(10, (3, 3), activation='relu', padding='same', input_shape=(28, 28, 1)))
conv2.add(MaxPooling2D((2, 2)))
conv2.add(Flatten())
conv2.add(Dropout(0.5))
conv2.add(Dense(100, activation='relu'))
conv2.add(Dense(10, activation='softmax'))
conv2.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_1 (Conv2D) (None, 28, 28, 10) 100 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 14, 14, 10) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 1960) 0 _________________________________________________________________ dropout (Dropout) (None, 1960) 0 _________________________________________________________________ dense_2 (Dense) (None, 100) 196100 _________________________________________________________________ dense_3 (Dense) (None, 10) 1010 ================================================================= Total params: 197,210 Trainable params: 197,210 Non-trainable params: 0 _________________________________________________________________
conv2.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
history = conv2.fit(x_train, y_train_encoded, epochs=20,
validation_data=(x_val, y_val_encoded))
Epoch 1/20 1500/1500 [==============================] - 8s 5ms/step - loss: 0.5372 - accuracy: 0.8059 - val_loss: 0.3691 - val_accuracy: 0.8695 Epoch 2/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.3853 - accuracy: 0.8607 - val_loss: 0.3214 - val_accuracy: 0.8850 Epoch 3/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.3390 - accuracy: 0.8758 - val_loss: 0.2837 - val_accuracy: 0.8957 Epoch 4/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.3064 - accuracy: 0.8865 - val_loss: 0.2682 - val_accuracy: 0.9050 Epoch 5/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.2885 - accuracy: 0.8927 - val_loss: 0.2631 - val_accuracy: 0.9039 Epoch 6/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.2646 - accuracy: 0.9005 - val_loss: 0.2458 - val_accuracy: 0.9132 Epoch 7/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.2518 - accuracy: 0.9065 - val_loss: 0.2495 - val_accuracy: 0.9120 Epoch 8/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.2426 - accuracy: 0.9086 - val_loss: 0.2488 - val_accuracy: 0.9085 Epoch 9/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.2323 - accuracy: 0.9134 - val_loss: 0.2335 - val_accuracy: 0.9156 Epoch 10/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.2247 - accuracy: 0.9155 - val_loss: 0.2332 - val_accuracy: 0.9170 Epoch 11/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.2151 - accuracy: 0.9195 - val_loss: 0.2357 - val_accuracy: 0.9178 Epoch 12/20 1500/1500 [==============================] - 6s 4ms/step - loss: 0.2115 - accuracy: 0.9205 - val_loss: 0.2319 - val_accuracy: 0.9172 Epoch 13/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.2019 - accuracy: 0.9237 - val_loss: 0.2303 - val_accuracy: 0.9183 Epoch 14/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1977 - accuracy: 0.9257 - val_loss: 0.2280 - val_accuracy: 0.9193 Epoch 15/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1936 - accuracy: 0.9261 - val_loss: 0.2407 - val_accuracy: 0.9160 Epoch 16/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1892 - accuracy: 0.9272 - val_loss: 0.2283 - val_accuracy: 0.9199 Epoch 17/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1829 - accuracy: 0.9308 - val_loss: 0.2273 - val_accuracy: 0.9221 Epoch 18/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1776 - accuracy: 0.9331 - val_loss: 0.2313 - val_accuracy: 0.9207 Epoch 19/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1761 - accuracy: 0.9340 - val_loss: 0.2326 - val_accuracy: 0.9184 Epoch 20/20 1500/1500 [==============================] - 7s 5ms/step - loss: 0.1705 - accuracy: 0.9357 - val_loss: 0.2297 - val_accuracy: 0.9207
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'val_loss'])
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_accuracy', 'val_accuracy'])
plt.show()
loss, accuracy = conv2.evaluate(x_val, y_val_encoded, verbose=0)
print(accuracy)
0.9206666946411133