import numpy as np
import matplotlib.pyplot as plt
# Ensure results are reproducable
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
def mnist_load_data(path='mnist.npz'):
with np.load(path) as f:
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
return (x_train, y_train), (x_test, y_test)
(X_train, y_train), (X_test, y_test) = mnist_load_data(path='../input/mnist.npz')
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
plt.imshow(X_test[0])
<matplotlib.image.AxesImage at 0x7f524345c588>
from keras.layers import Input, Dense
from keras.models import Model
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
from keras.utils import plot_model
plot_model(autoencoder, to_file='model.png', show_shapes=True)
plt.figure(figsize=(10,10))
plt.imshow(plt.imread('model.png'))
<matplotlib.image.AxesImage at 0x7f5243396ef0>
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
X_train_flat = X_train.reshape((len(X_train), np.prod(X_train.shape[1:])))
X_test_flat = X_test.reshape((len(X_test), np.prod(X_test.shape[1:])))
print(X_train_flat.shape)
print(X_test_flat.shape)
(60000, 784) (10000, 784)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(X_train_flat, X_train_flat,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(X_test_flat, X_test_flat))
Train on 60000 samples, validate on 10000 samples Epoch 1/50 60000/60000 [==============================] - 3s 46us/step - loss: 0.3558 - val_loss: 0.2710 Epoch 2/50 60000/60000 [==============================] - 2s 35us/step - loss: 0.2642 - val_loss: 0.2538 Epoch 3/50 60000/60000 [==============================] - 2s 33us/step - loss: 0.2441 - val_loss: 0.2320 Epoch 4/50 60000/60000 [==============================] - 2s 27us/step - loss: 0.2239 - val_loss: 0.2134 Epoch 5/50 60000/60000 [==============================] - 2s 27us/step - loss: 0.2075 - val_loss: 0.1992 Epoch 6/50 60000/60000 [==============================] - 2s 27us/step - loss: 0.1955 - val_loss: 0.1893 Epoch 7/50 60000/60000 [==============================] - 2s 27us/step - loss: 0.1868 - val_loss: 0.1819 Epoch 8/50 60000/60000 [==============================] - 2s 27us/step - loss: 0.1798 - val_loss: 0.1754 Epoch 9/50 60000/60000 [==============================] - 2s 25us/step - loss: 0.1739 - val_loss: 0.1701 Epoch 10/50 60000/60000 [==============================] - 1s 24us/step - loss: 0.1689 - val_loss: 0.1655 Epoch 11/50 60000/60000 [==============================] - 1s 22us/step - loss: 0.1646 - val_loss: 0.1613 Epoch 12/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1606 - val_loss: 0.1575 Epoch 13/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1571 - val_loss: 0.1541 Epoch 14/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1538 - val_loss: 0.1509 Epoch 15/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1507 - val_loss: 0.1479 Epoch 16/50 60000/60000 [==============================] - 1s 23us/step - loss: 0.1478 - val_loss: 0.1450 Epoch 17/50 60000/60000 [==============================] - 1s 25us/step - loss: 0.1450 - val_loss: 0.1422 Epoch 18/50 60000/60000 [==============================] - 1s 23us/step - loss: 0.1423 - val_loss: 0.1397 Epoch 19/50 60000/60000 [==============================] - 1s 22us/step - loss: 0.1398 - val_loss: 0.1372 Epoch 20/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1374 - val_loss: 0.1348 Epoch 21/50 60000/60000 [==============================] - 1s 20us/step - loss: 0.1351 - val_loss: 0.1326 Epoch 22/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1330 - val_loss: 0.1304 Epoch 23/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1309 - val_loss: 0.1284 Epoch 24/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1290 - val_loss: 0.1266 Epoch 25/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1271 - val_loss: 0.1247 Epoch 26/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1254 - val_loss: 0.1231 Epoch 27/50 60000/60000 [==============================] - 2s 26us/step - loss: 0.1238 - val_loss: 0.1215 Epoch 28/50 60000/60000 [==============================] - 2s 27us/step - loss: 0.1223 - val_loss: 0.1200 Epoch 29/50 60000/60000 [==============================] - 2s 28us/step - loss: 0.1208 - val_loss: 0.1185 Epoch 30/50 60000/60000 [==============================] - 1s 25us/step - loss: 0.1195 - val_loss: 0.1172 Epoch 31/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1182 - val_loss: 0.1160 Epoch 32/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1170 - val_loss: 0.1149 Epoch 33/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1159 - val_loss: 0.1138 Epoch 34/50 60000/60000 [==============================] - 1s 22us/step - loss: 0.1149 - val_loss: 0.1128 Epoch 35/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1140 - val_loss: 0.1119 Epoch 36/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1131 - val_loss: 0.1110 Epoch 37/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1123 - val_loss: 0.1103 Epoch 38/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1116 - val_loss: 0.1096 Epoch 39/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1108 - val_loss: 0.1089 Epoch 40/50 60000/60000 [==============================] - 1s 23us/step - loss: 0.1102 - val_loss: 0.1082 Epoch 41/50 60000/60000 [==============================] - 1s 24us/step - loss: 0.1096 - val_loss: 0.1076 Epoch 42/50 60000/60000 [==============================] - 1s 24us/step - loss: 0.1090 - val_loss: 0.1071 Epoch 43/50 60000/60000 [==============================] - 1s 23us/step - loss: 0.1085 - val_loss: 0.1066 Epoch 44/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1080 - val_loss: 0.1061 Epoch 45/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1075 - val_loss: 0.1057 Epoch 46/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1071 - val_loss: 0.1052 Epoch 47/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1066 - val_loss: 0.1048 Epoch 48/50 60000/60000 [==============================] - 1s 20us/step - loss: 0.1062 - val_loss: 0.1044 Epoch 49/50 60000/60000 [==============================] - 1s 21us/step - loss: 0.1059 - val_loss: 0.1041 Epoch 50/50 60000/60000 [==============================] - 1s 20us/step - loss: 0.1055 - val_loss: 0.1037
<keras.callbacks.History at 0x7f520169f0b8>
original = np.expand_dims(X_test_flat[0],0)
seven = autoencoder.predict(original)
seven = seven.reshape(1,28,28)
original = original.reshape(1,28,28)
fig = plt.figure(figsize=(7, 10))
a=fig.add_subplot(1,2,1)
a.set_title('Original')
imgplot = plt.imshow(original[0,:,:])
b=fig.add_subplot(1,2,2)
b.set_title('Autoencoder')
imgplot = plt.imshow(seven[0,:,:])
import numpy as np
import matplotlib.pyplot as plt
#from scipy.stats import norm
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import backend as K
from keras import metrics
batch_size = 100
original_dim = 784
latent_dim = 32
intermediate_dim = 256
epochs = 50
epsilon_std = 1.0
x = Input(shape=(original_dim,))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling)([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
h_decoded = Dense(intermediate_dim, activation='relu')(z)
x_decoded = Dense(original_dim, activation='sigmoid')(h_decoded)
# instantiate VAE model
vae = Model(x, x_decoded)
# Compute VAE loss
reconstruction_loss = original_dim * metrics.binary_crossentropy(x, x_decoded)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
vae.summary()
__________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_8 (InputLayer) (None, 784) 0 __________________________________________________________________________________________________ dense_12 (Dense) (None, 256) 200960 input_8[0][0] __________________________________________________________________________________________________ dense_13 (Dense) (None, 32) 8224 dense_12[0][0] __________________________________________________________________________________________________ dense_14 (Dense) (None, 32) 8224 dense_12[0][0] __________________________________________________________________________________________________ lambda_2 (Lambda) (None, 32) 0 dense_13[0][0] dense_14[0][0] __________________________________________________________________________________________________ dense_15 (Dense) (None, 256) 8448 lambda_2[0][0] __________________________________________________________________________________________________ dense_16 (Dense) (None, 784) 201488 dense_15[0][0] ================================================================================================== Total params: 427,344 Trainable params: 427,344 Non-trainable params: 0 __________________________________________________________________________________________________
vae.fit(X_train_flat,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_test_flat, None))
Train on 60000 samples, validate on 10000 samples Epoch 1/50 60000/60000 [==============================] - 4s 64us/step - loss: 159.8752 - val_loss: 130.9053 Epoch 2/50 60000/60000 [==============================] - 4s 70us/step - loss: 126.9431 - val_loss: 120.5206 Epoch 3/50 60000/60000 [==============================] - 5s 80us/step - loss: 119.3553 - val_loss: 116.8157 Epoch 4/50 60000/60000 [==============================] - 4s 73us/step - loss: 115.5697 - val_loss: 113.6902 Epoch 5/50 60000/60000 [==============================] - 4s 68us/step - loss: 113.2996 - val_loss: 111.5416 Epoch 6/50 60000/60000 [==============================] - 4s 66us/step - loss: 111.8469 - val_loss: 110.2709 Epoch 7/50 60000/60000 [==============================] - 4s 63us/step - loss: 110.7690 - val_loss: 110.2429 Epoch 8/50 60000/60000 [==============================] - 4s 63us/step - loss: 109.9677 - val_loss: 109.4120 Epoch 9/50 60000/60000 [==============================] - 3s 56us/step - loss: 109.3722 - val_loss: 109.0644 Epoch 10/50 60000/60000 [==============================] - 3s 55us/step - loss: 108.8697 - val_loss: 107.7731 Epoch 11/50 60000/60000 [==============================] - 3s 57us/step - loss: 108.4473 - val_loss: 108.0265 Epoch 12/50 60000/60000 [==============================] - 4s 59us/step - loss: 108.0508 - val_loss: 107.5796 Epoch 13/50 60000/60000 [==============================] - 4s 64us/step - loss: 107.7435 - val_loss: 107.2199 Epoch 14/50 60000/60000 [==============================] - 4s 62us/step - loss: 107.4772 - val_loss: 107.0171 Epoch 15/50 60000/60000 [==============================] - 3s 57us/step - loss: 107.1876 - val_loss: 106.3389 Epoch 16/50 60000/60000 [==============================] - 3s 56us/step - loss: 107.0171 - val_loss: 106.2242 Epoch 17/50 60000/60000 [==============================] - 3s 56us/step - loss: 106.8215 - val_loss: 106.2699 Epoch 18/50 60000/60000 [==============================] - 4s 59us/step - loss: 106.6459 - val_loss: 106.4430 Epoch 19/50 60000/60000 [==============================] - 4s 63us/step - loss: 106.4755 - val_loss: 105.5310 Epoch 20/50 60000/60000 [==============================] - 3s 56us/step - loss: 106.4311 - val_loss: 106.0317 Epoch 21/50 60000/60000 [==============================] - 3s 57us/step - loss: 106.2307 - val_loss: 105.7900 Epoch 22/50 60000/60000 [==============================] - 4s 68us/step - loss: 106.1153 - val_loss: 105.7806 Epoch 23/50 60000/60000 [==============================] - 5s 76us/step - loss: 106.0152 - val_loss: 105.5900 Epoch 24/50 60000/60000 [==============================] - 5s 79us/step - loss: 105.9018 - val_loss: 105.9176 Epoch 25/50 60000/60000 [==============================] - 4s 74us/step - loss: 105.7904 - val_loss: 105.5335 Epoch 26/50 60000/60000 [==============================] - 4s 67us/step - loss: 105.6816 - val_loss: 105.3009 Epoch 27/50 60000/60000 [==============================] - 3s 57us/step - loss: 105.6579 - val_loss: 105.5394 Epoch 28/50 60000/60000 [==============================] - 3s 57us/step - loss: 105.5281 - val_loss: 104.9429 Epoch 29/50 60000/60000 [==============================] - 4s 61us/step - loss: 105.4698 - val_loss: 105.1891 Epoch 30/50 60000/60000 [==============================] - 4s 62us/step - loss: 105.3761 - val_loss: 104.7125 Epoch 31/50 60000/60000 [==============================] - 3s 56us/step - loss: 105.3125 - val_loss: 105.3194 Epoch 32/50 60000/60000 [==============================] - 3s 57us/step - loss: 105.2293 - val_loss: 105.7832 Epoch 33/50 60000/60000 [==============================] - 3s 56us/step - loss: 105.2023 - val_loss: 105.1130 Epoch 34/50 60000/60000 [==============================] - 3s 57us/step - loss: 105.0885 - val_loss: 105.2185 Epoch 35/50 60000/60000 [==============================] - 4s 62us/step - loss: 105.0555 - val_loss: 104.5444 Epoch 36/50 60000/60000 [==============================] - 3s 56us/step - loss: 104.9912 - val_loss: 104.8461 Epoch 37/50 60000/60000 [==============================] - 3s 56us/step - loss: 104.9423 - val_loss: 104.7661 Epoch 38/50 60000/60000 [==============================] - 3s 56us/step - loss: 104.8586 - val_loss: 104.2338 Epoch 39/50 60000/60000 [==============================] - 3s 57us/step - loss: 104.7935 - val_loss: 104.5815 Epoch 40/50 60000/60000 [==============================] - 4s 61us/step - loss: 104.7809 - val_loss: 104.7738 Epoch 41/50 60000/60000 [==============================] - 3s 57us/step - loss: 104.7650 - val_loss: 104.8135 Epoch 42/50 60000/60000 [==============================] - 4s 68us/step - loss: 104.7377 - val_loss: 104.5791 Epoch 43/50 60000/60000 [==============================] - 4s 70us/step - loss: 104.6725 - val_loss: 104.7893 Epoch 44/50 60000/60000 [==============================] - 4s 69us/step - loss: 104.6280 - val_loss: 104.3656 Epoch 45/50 60000/60000 [==============================] - 5s 75us/step - loss: 104.5809 - val_loss: 104.5995 Epoch 46/50 60000/60000 [==============================] - 4s 66us/step - loss: 104.5388 - val_loss: 104.5873 Epoch 47/50 60000/60000 [==============================] - 3s 56us/step - loss: 104.4693 - val_loss: 104.3842 Epoch 48/50 60000/60000 [==============================] - 3s 58us/step - loss: 104.4929 - val_loss: 104.1723 Epoch 49/50 60000/60000 [==============================] - 3s 56us/step - loss: 104.4392 - val_loss: 105.1005 Epoch 50/50 60000/60000 [==============================] - 3s 55us/step - loss: 104.4380 - val_loss: 104.2996
<keras.callbacks.History at 0x7f5242f3ccf8>
one_seven = X_test_flat[0]
one_seven = np.expand_dims(one_seven,0)
one_seven.shape
(1, 784)
one_seven = one_seven.repeat(4,axis=0)
s = vae.predict(one_seven)
s.shape
(4, 784)
s= s.reshape(4,28,28)
fig=plt.figure(figsize=(8, 8))
columns = 2
rows = 2
for i in range(1, columns*rows +1):
img = s[i-1]
fig.add_subplot(rows, columns, i)
plt.imshow(img)
plt.show()
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(X_test_flat, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()