Python Machine Learning 3rd Edition by Sebastian Raschka & Vahid Mirjalili, Packt Publishing Ltd. 2019
Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-edition
Code License: MIT License
Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
%load_ext watermark
%watermark -a "Sebastian Raschka & Vahid Mirjalili" -u -d -p numpy,scipy,matplotlib,tensorflow,tensorflow_datasets
Sebastian Raschka & Vahid Mirjalili last updated: 2019-11-06 numpy 1.17.2 scipy 1.2.1 matplotlib 3.1.0 tensorflow 2.0.0 tensorflow_datasets 1.3.0
from IPython.display import Image
%matplotlib inline
## For running on Google-Colab
# ! pip install -q tensorflow-gpu==2.0.0
# from google.colab import drive
# drive.mount('/content/drive/')
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
print(tf.__version__)
print("GPU Available:", tf.test.is_gpu_available())
if tf.test.is_gpu_available():
device_name = tf.test.gpu_device_name()
else:
device_name = 'cpu:0'
print(device_name)
2.0.0 GPU Available: True /device:GPU:0
Image(filename='images/17_12.png', width=700)
def make_dcgan_generator(
z_size=100,
output_size=(28, 28, 1),
n_filters=64):
hidden_size = (7, 7)
model = tf.keras.Sequential()
# 100 ==> 784 ==> 7x7x64
model.add(tf.keras.layers.Dense(
units=n_filters*np.prod(hidden_size), use_bias=False)
)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU(alpha=0.0001))
model.add(tf.keras.layers.Reshape(
target_shape=(hidden_size[0], hidden_size[1], n_filters))
)
# 7x7x64 ==> 14*14*32
model.add(tf.keras.layers.Conv2DTranspose(
filters=n_filters//2, kernel_size=(3, 3), strides=(2, 2),
padding='same', use_bias=False, activation=None)
)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU(alpha=0.0001))
model.add(tf.keras.layers.Dropout(0.5))
# 14x14x32 ==> 28x28x16
model.add(tf.keras.layers.Conv2DTranspose(
filters=n_filters//4, kernel_size=(3, 3), strides=(2, 2),
padding='same', use_bias=False, activation=None)
)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU(alpha=0.0001))
model.add(tf.keras.layers.Dropout(0.5))
# 28x28x16 ==> 28x28x8
model.add(tf.keras.layers.Conv2DTranspose(
filters=n_filters//8, kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False, activation=None)
)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU(alpha=0.0001))
model.add(tf.keras.layers.Dropout(0.5))
# 28x28x8 ==> 28x28x1
model.add(tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False, activation='tanh')
)
return model
gen_model = make_dcgan_generator()
gen_model.build(input_shape=(None, 20))
gen_model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense (Dense) multiple 62720 _________________________________________________________________ batch_normalization (BatchNo multiple 12544 _________________________________________________________________ leaky_re_lu (LeakyReLU) multiple 0 _________________________________________________________________ reshape (Reshape) multiple 0 _________________________________________________________________ conv2d_transpose (Conv2DTran multiple 18432 _________________________________________________________________ batch_normalization_1 (Batch multiple 128 _________________________________________________________________ leaky_re_lu_1 (LeakyReLU) multiple 0 _________________________________________________________________ dropout (Dropout) multiple 0 _________________________________________________________________ conv2d_transpose_1 (Conv2DTr multiple 4608 _________________________________________________________________ batch_normalization_2 (Batch multiple 64 _________________________________________________________________ leaky_re_lu_2 (LeakyReLU) multiple 0 _________________________________________________________________ dropout_1 (Dropout) multiple 0 _________________________________________________________________ conv2d_transpose_2 (Conv2DTr multiple 1152 _________________________________________________________________ batch_normalization_3 (Batch multiple 32 _________________________________________________________________ leaky_re_lu_3 (LeakyReLU) multiple 0 _________________________________________________________________ dropout_2 (Dropout) multiple 0 _________________________________________________________________ conv2d_transpose_3 (Conv2DTr multiple 72 ================================================================= Total params: 99,752 Trainable params: 93,368 Non-trainable params: 6,384 _________________________________________________________________
Image(filename='images/17_13.png', width=700)
def make_dcgan_discriminator(
input_size=(28, 28, 1),
n_filters=64):
hidden_size = (7, 7)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Reshape(
target_shape=(input_size[0], input_size[1], input_size[2]))
)
# 7x7x64 ==> 14*14*32
model.add(tf.keras.layers.Conv2D(
filters=n_filters//8, kernel_size=(3, 3), strides=(2, 2),
padding='same', use_bias=False, activation=None)
)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU(alpha=0.0001))
model.add(tf.keras.layers.Dropout(0.5))
# 14x14x32 ==> 28x28x16
model.add(tf.keras.layers.Conv2D(
filters=n_filters//2, kernel_size=(3, 3), strides=(2, 2),
padding='same', use_bias=False, activation=None)
)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU(alpha=0.0001))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Reshape(
target_shape=(np.prod([input_size[0]//4, input_size[1]//4, n_filters//2]),))
)
model.add(tf.keras.layers.Dense(
units=1, use_bias=False)
)
return model
disc_model = make_dcgan_discriminator()
disc_model.build(input_shape=(None, 28, 28, 1))
disc_model.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= reshape_1 (Reshape) multiple 0 _________________________________________________________________ conv2d (Conv2D) multiple 72 _________________________________________________________________ batch_normalization_4 (Batch multiple 32 _________________________________________________________________ leaky_re_lu_4 (LeakyReLU) multiple 0 _________________________________________________________________ dropout_3 (Dropout) multiple 0 _________________________________________________________________ conv2d_1 (Conv2D) multiple 2304 _________________________________________________________________ batch_normalization_5 (Batch multiple 128 _________________________________________________________________ leaky_re_lu_5 (LeakyReLU) multiple 0 _________________________________________________________________ dropout_4 (Dropout) multiple 0 _________________________________________________________________ reshape_2 (Reshape) multiple 0 _________________________________________________________________ dense_1 (Dense) multiple 1568 ================================================================= Total params: 4,104 Trainable params: 4,024 Non-trainable params: 80 _________________________________________________________________
mnist_bldr = tfds.builder('mnist')
mnist_bldr.download_and_prepare()
mnist = mnist_bldr.as_dataset(shuffle_files=False)
def preprocess(ex, mode='uniform'):
image = ex['image']
image = tf.image.convert_image_dtype(image, tf.float32)
image = image*2 - 1.0
if mode == 'uniform':
input_z = tf.random.uniform(shape=(z_size,),
minval=-1.0, maxval=1.0)
elif mode == 'normal':
input_z = tf.random.normal(shape=(z_size,))
return input_z, image
num_epochs = 100
batch_size = 64
image_size = (28, 28)
z_size = 20
mode_z = 'uniform'
#gen_hidden_layers = 1
#gen_hidden_size = 100
#disc_hidden_layers = 1
#disc_hidden_size = 100
tf.random.set_seed(1)
np.random.seed(1)
if mode_z == 'uniform':
fixed_z = tf.random.uniform(
shape=(batch_size, z_size),
minval=-1, maxval=1)
elif mode_z == 'normal':
fixed_z = tf.random.normal(
shape=(batch_size, z_size))
def create_samples(g_model, input_z):
g_output = g_model(input_z, training=False)
images = tf.reshape(g_output, (batch_size, *image_size))
return (images+1)/2.0
## Set-up the dataset
mnist_trainset = mnist['train']
mnist_trainset = mnist_trainset.map(
lambda ex: preprocess(ex, mode=mode_z))
mnist_trainset = mnist_trainset.shuffle(10000)
#mnist_trainset = mnist_trainset.batch(
# batch_size, drop_remainder=True)
mnist_trainset = mnist_trainset.batch(
batch_size, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE)
import time
# Delete the previously instantiated
# objects that we have defined
# for printing the model summaries
del gen_model
del disc_model
## Set-up the model
with tf.device(device_name):
gen_model = make_dcgan_generator()
disc_model = make_dcgan_discriminator()
## Loss function and optimizers:
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
g_optimizer = tf.keras.optimizers.Adam()
d_optimizer = tf.keras.optimizers.Adam()
all_losses = []
all_d_vals = []
epoch_samples = []
start_time = time.time()
for epoch in range(1, num_epochs+1):
epoch_losses, epoch_d_vals = [], []
for i,(input_z,input_real) in enumerate(mnist_trainset): #.take(4)
## Compute generator's loss
with tf.GradientTape() as g_tape:
g_output = gen_model(input_z)
d_logits_fake = disc_model(g_output, training=True)
g_loss = loss_fn(y_true=tf.ones_like(d_logits_fake),
y_pred=d_logits_fake)
## > Compute the gradients of g_loss
g_grads = g_tape.gradient(g_loss, gen_model.trainable_variables)
g_optimizer.apply_gradients(
grads_and_vars=zip(g_grads, gen_model.trainable_variables))
## Compute discriminator's loss
with tf.GradientTape() as d_tape:
d_logits_real = disc_model(input_real, training=True)
d_logits_fake = disc_model(g_output, training=True)
d_loss_real = loss_fn(y_true=tf.ones_like(d_logits_real),
y_pred=d_logits_real)
d_loss_fake = loss_fn(y_true=tf.zeros_like(d_logits_fake),
y_pred=d_logits_fake)
d_loss = d_loss_real + d_loss_fake
## > Compute the gradients of d_loss
d_grads = d_tape.gradient(d_loss, disc_model.trainable_variables)
d_optimizer.apply_gradients(
grads_and_vars=zip(d_grads, disc_model.trainable_variables))
epoch_losses.append(
(g_loss.numpy(), d_loss.numpy(),
d_loss_real.numpy(), d_loss_fake.numpy()))
d_probs_real = tf.reduce_mean(tf.sigmoid(d_logits_real))
d_probs_fake = tf.reduce_mean(tf.sigmoid(d_logits_fake))
epoch_d_vals.append((d_probs_real.numpy(), d_probs_fake.numpy()))
all_losses.append(epoch_losses)
all_d_vals.append(epoch_d_vals)
print(
'Epoch {:03d} | ET {:.2f} min | Avg Losses >>'
' G/D {:.4f}/{:.4f} [D-Real: {:.4f} D-Fake: {:.4f}]'
.format(
epoch, (time.time() - start_time)/60,
*list(np.mean(all_losses[-1], axis=0))))
epoch_samples.append(
create_samples(gen_model, fixed_z).numpy())
Epoch 001 | ET 1.17 min | Avg Losses >> G/D 1.4423/1.2245 [D-Real: 0.6096 D-Fake: 0.6149] Epoch 002 | ET 2.30 min | Avg Losses >> G/D 1.5769/0.9223 [D-Real: 0.4605 D-Fake: 0.4618] Epoch 003 | ET 3.44 min | Avg Losses >> G/D 2.2352/0.5379 [D-Real: 0.2684 D-Fake: 0.2696] Epoch 004 | ET 4.58 min | Avg Losses >> G/D 1.4573/1.0198 [D-Real: 0.5179 D-Fake: 0.5020] Epoch 005 | ET 5.73 min | Avg Losses >> G/D 1.2001/1.0968 [D-Real: 0.5515 D-Fake: 0.5453] Epoch 006 | ET 6.86 min | Avg Losses >> G/D 1.0188/1.2351 [D-Real: 0.6220 D-Fake: 0.6131] Epoch 007 | ET 8.01 min | Avg Losses >> G/D 0.9194/1.3020 [D-Real: 0.6565 D-Fake: 0.6454] Epoch 008 | ET 9.15 min | Avg Losses >> G/D 0.8732/1.3115 [D-Real: 0.6674 D-Fake: 0.6441] Epoch 009 | ET 10.30 min | Avg Losses >> G/D 0.8083/1.3442 [D-Real: 0.6789 D-Fake: 0.6653] Epoch 010 | ET 11.45 min | Avg Losses >> G/D 0.7985/1.3474 [D-Real: 0.6790 D-Fake: 0.6684] Epoch 011 | ET 12.60 min | Avg Losses >> G/D 0.7744/1.3548 [D-Real: 0.6802 D-Fake: 0.6746] Epoch 012 | ET 13.73 min | Avg Losses >> G/D 0.7608/1.3628 [D-Real: 0.6901 D-Fake: 0.6727] Epoch 013 | ET 14.88 min | Avg Losses >> G/D 0.7643/1.3544 [D-Real: 0.6801 D-Fake: 0.6743] Epoch 014 | ET 16.03 min | Avg Losses >> G/D 0.7413/1.3629 [D-Real: 0.6801 D-Fake: 0.6828] Epoch 015 | ET 17.16 min | Avg Losses >> G/D 0.7381/1.3694 [D-Real: 0.6876 D-Fake: 0.6818] Epoch 016 | ET 18.30 min | Avg Losses >> G/D 0.7428/1.3662 [D-Real: 0.6866 D-Fake: 0.6796] Epoch 017 | ET 19.44 min | Avg Losses >> G/D 0.7345/1.3709 [D-Real: 0.6867 D-Fake: 0.6841] Epoch 018 | ET 20.59 min | Avg Losses >> G/D 0.7265/1.3703 [D-Real: 0.6817 D-Fake: 0.6887] Epoch 019 | ET 21.74 min | Avg Losses >> G/D 0.7352/1.3672 [D-Real: 0.6819 D-Fake: 0.6853] Epoch 020 | ET 22.89 min | Avg Losses >> G/D 0.7174/1.3754 [D-Real: 0.6867 D-Fake: 0.6886] Epoch 021 | ET 24.03 min | Avg Losses >> G/D 0.7264/1.3731 [D-Real: 0.6866 D-Fake: 0.6866] Epoch 022 | ET 25.17 min | Avg Losses >> G/D 0.7262/1.3714 [D-Real: 0.6843 D-Fake: 0.6871] Epoch 023 | ET 26.30 min | Avg Losses >> G/D 0.7128/1.3777 [D-Real: 0.6884 D-Fake: 0.6893] Epoch 024 | ET 27.44 min | Avg Losses >> G/D 0.7185/1.3762 [D-Real: 0.6900 D-Fake: 0.6862] Epoch 025 | ET 28.59 min | Avg Losses >> G/D 0.7170/1.3745 [D-Real: 0.6844 D-Fake: 0.6901] Epoch 026 | ET 29.73 min | Avg Losses >> G/D 0.7170/1.3788 [D-Real: 0.6945 D-Fake: 0.6843] Epoch 027 | ET 30.88 min | Avg Losses >> G/D 0.7077/1.3786 [D-Real: 0.6868 D-Fake: 0.6918] Epoch 028 | ET 32.02 min | Avg Losses >> G/D 0.7135/1.3797 [D-Real: 0.6939 D-Fake: 0.6858] Epoch 029 | ET 33.16 min | Avg Losses >> G/D 0.7091/1.3790 [D-Real: 0.6901 D-Fake: 0.6889] Epoch 030 | ET 34.30 min | Avg Losses >> G/D 0.7133/1.3763 [D-Real: 0.6873 D-Fake: 0.6891] Epoch 031 | ET 35.45 min | Avg Losses >> G/D 0.7035/1.3791 [D-Real: 0.6849 D-Fake: 0.6943] Epoch 032 | ET 36.60 min | Avg Losses >> G/D 0.7056/1.3810 [D-Real: 0.6884 D-Fake: 0.6926] Epoch 033 | ET 37.74 min | Avg Losses >> G/D 0.7030/1.3815 [D-Real: 0.6911 D-Fake: 0.6904] Epoch 034 | ET 38.89 min | Avg Losses >> G/D 0.7008/1.3812 [D-Real: 0.6886 D-Fake: 0.6926] Epoch 035 | ET 40.03 min | Avg Losses >> G/D 0.7035/1.3825 [D-Real: 0.6928 D-Fake: 0.6897] Epoch 036 | ET 41.17 min | Avg Losses >> G/D 0.6979/1.3822 [D-Real: 0.6864 D-Fake: 0.6959] Epoch 037 | ET 42.32 min | Avg Losses >> G/D 0.7029/1.3806 [D-Real: 0.6884 D-Fake: 0.6922] Epoch 038 | ET 43.46 min | Avg Losses >> G/D 0.7034/1.3834 [D-Real: 0.6934 D-Fake: 0.6900] Epoch 039 | ET 44.60 min | Avg Losses >> G/D 0.7004/1.3830 [D-Real: 0.6913 D-Fake: 0.6917] Epoch 040 | ET 45.75 min | Avg Losses >> G/D 0.7083/1.3784 [D-Real: 0.6892 D-Fake: 0.6891] Epoch 041 | ET 46.89 min | Avg Losses >> G/D 0.7068/1.3823 [D-Real: 0.6929 D-Fake: 0.6894] Epoch 042 | ET 48.03 min | Avg Losses >> G/D 0.6999/1.3820 [D-Real: 0.6892 D-Fake: 0.6928] Epoch 043 | ET 49.18 min | Avg Losses >> G/D 0.6978/1.3831 [D-Real: 0.6893 D-Fake: 0.6938] Epoch 044 | ET 50.32 min | Avg Losses >> G/D 0.7251/1.3680 [D-Real: 0.6807 D-Fake: 0.6873] Epoch 045 | ET 51.47 min | Avg Losses >> G/D 0.6947/1.3837 [D-Real: 0.6872 D-Fake: 0.6965] Epoch 046 | ET 52.62 min | Avg Losses >> G/D 0.6997/1.3836 [D-Real: 0.6900 D-Fake: 0.6936] Epoch 047 | ET 53.77 min | Avg Losses >> G/D 0.7068/1.3840 [D-Real: 0.6982 D-Fake: 0.6859] Epoch 048 | ET 54.91 min | Avg Losses >> G/D 0.6997/1.3836 [D-Real: 0.6899 D-Fake: 0.6937] Epoch 049 | ET 56.05 min | Avg Losses >> G/D 0.6967/1.3852 [D-Real: 0.6917 D-Fake: 0.6935] Epoch 050 | ET 57.20 min | Avg Losses >> G/D 0.6986/1.3824 [D-Real: 0.6872 D-Fake: 0.6952] Epoch 051 | ET 58.34 min | Avg Losses >> G/D 0.7016/1.3818 [D-Real: 0.6899 D-Fake: 0.6919] Epoch 052 | ET 59.49 min | Avg Losses >> G/D 0.6989/1.3854 [D-Real: 0.6935 D-Fake: 0.6919] Epoch 053 | ET 60.64 min | Avg Losses >> G/D 0.7013/1.3839 [D-Real: 0.6940 D-Fake: 0.6899] Epoch 054 | ET 61.78 min | Avg Losses >> G/D 0.7049/1.3853 [D-Real: 0.7007 D-Fake: 0.6846] Epoch 055 | ET 62.92 min | Avg Losses >> G/D 0.7259/1.3617 [D-Real: 0.6717 D-Fake: 0.6899] Epoch 056 | ET 64.07 min | Avg Losses >> G/D 0.7057/1.3851 [D-Real: 0.6998 D-Fake: 0.6853] Epoch 057 | ET 65.20 min | Avg Losses >> G/D 0.7005/1.3853 [D-Real: 0.6961 D-Fake: 0.6892] Epoch 058 | ET 66.34 min | Avg Losses >> G/D 0.6972/1.3847 [D-Real: 0.6921 D-Fake: 0.6925] Epoch 059 | ET 67.49 min | Avg Losses >> G/D 0.7129/1.3753 [D-Real: 0.6868 D-Fake: 0.6884] Epoch 060 | ET 68.64 min | Avg Losses >> G/D 0.7019/1.3858 [D-Real: 0.6976 D-Fake: 0.6882] Epoch 061 | ET 69.79 min | Avg Losses >> G/D 0.6951/1.3855 [D-Real: 0.6917 D-Fake: 0.6938] Epoch 062 | ET 70.95 min | Avg Losses >> G/D 0.6983/1.3851 [D-Real: 0.6944 D-Fake: 0.6907] Epoch 063 | ET 72.10 min | Avg Losses >> G/D 0.6969/1.3857 [D-Real: 0.6935 D-Fake: 0.6922] Epoch 064 | ET 73.25 min | Avg Losses >> G/D 0.6981/1.3853 [D-Real: 0.6948 D-Fake: 0.6905] Epoch 065 | ET 74.40 min | Avg Losses >> G/D 0.7005/1.3845 [D-Real: 0.6952 D-Fake: 0.6892] Epoch 066 | ET 75.56 min | Avg Losses >> G/D 0.7050/1.3830 [D-Real: 0.6959 D-Fake: 0.6871] Epoch 067 | ET 76.71 min | Avg Losses >> G/D 0.6947/1.3843 [D-Real: 0.6887 D-Fake: 0.6957] Epoch 068 | ET 77.86 min | Avg Losses >> G/D 0.7003/1.3844 [D-Real: 0.6939 D-Fake: 0.6905] Epoch 069 | ET 79.01 min | Avg Losses >> G/D 0.6959/1.3851 [D-Real: 0.6917 D-Fake: 0.6933] Epoch 070 | ET 80.17 min | Avg Losses >> G/D 0.6996/1.3841 [D-Real: 0.6937 D-Fake: 0.6904] Epoch 071 | ET 81.32 min | Avg Losses >> G/D 0.6971/1.3853 [D-Real: 0.6937 D-Fake: 0.6916] Epoch 072 | ET 82.47 min | Avg Losses >> G/D 0.7062/1.3841 [D-Real: 0.6992 D-Fake: 0.6849] Epoch 073 | ET 83.62 min | Avg Losses >> G/D 0.6978/1.3837 [D-Real: 0.6896 D-Fake: 0.6941] Epoch 074 | ET 84.78 min | Avg Losses >> G/D 0.6973/1.3848 [D-Real: 0.6931 D-Fake: 0.6917] Epoch 075 | ET 85.94 min | Avg Losses >> G/D 0.6995/1.3847 [D-Real: 0.6945 D-Fake: 0.6902] Epoch 076 | ET 87.09 min | Avg Losses >> G/D 0.6966/1.3849 [D-Real: 0.6921 D-Fake: 0.6928] Epoch 077 | ET 88.24 min | Avg Losses >> G/D 0.7004/1.3837 [D-Real: 0.6934 D-Fake: 0.6903] Epoch 078 | ET 89.39 min | Avg Losses >> G/D 0.6973/1.3851 [D-Real: 0.6924 D-Fake: 0.6927] Epoch 079 | ET 90.55 min | Avg Losses >> G/D 0.6972/1.3856 [D-Real: 0.6942 D-Fake: 0.6914] Epoch 080 | ET 91.71 min | Avg Losses >> G/D 0.6942/1.3855 [D-Real: 0.6917 D-Fake: 0.6938] Epoch 081 | ET 92.86 min | Avg Losses >> G/D 0.6936/1.3848 [D-Real: 0.6889 D-Fake: 0.6959] Epoch 082 | ET 94.01 min | Avg Losses >> G/D 0.6967/1.3839 [D-Real: 0.6905 D-Fake: 0.6934] Epoch 083 | ET 95.16 min | Avg Losses >> G/D 0.6999/1.3849 [D-Real: 0.6934 D-Fake: 0.6915] Epoch 084 | ET 96.32 min | Avg Losses >> G/D 0.6937/1.3835 [D-Real: 0.6871 D-Fake: 0.6964] Epoch 085 | ET 97.47 min | Avg Losses >> G/D 0.6967/1.3847 [D-Real: 0.6908 D-Fake: 0.6939] Epoch 086 | ET 98.63 min | Avg Losses >> G/D 0.6954/1.3849 [D-Real: 0.6909 D-Fake: 0.6940] Epoch 087 | ET 99.78 min | Avg Losses >> G/D 0.7017/1.3851 [D-Real: 0.6972 D-Fake: 0.6879] Epoch 088 | ET 100.93 min | Avg Losses >> G/D 0.7004/1.3854 [D-Real: 0.6964 D-Fake: 0.6890] Epoch 089 | ET 102.08 min | Avg Losses >> G/D 0.6973/1.3856 [D-Real: 0.6941 D-Fake: 0.6916] Epoch 090 | ET 103.21 min | Avg Losses >> G/D 0.6968/1.3852 [D-Real: 0.6932 D-Fake: 0.6920] Epoch 091 | ET 104.37 min | Avg Losses >> G/D 0.6957/1.3843 [D-Real: 0.6903 D-Fake: 0.6940] Epoch 092 | ET 105.52 min | Avg Losses >> G/D 0.6986/1.3842 [D-Real: 0.6926 D-Fake: 0.6916] Epoch 093 | ET 106.68 min | Avg Losses >> G/D 0.7032/1.3860 [D-Real: 0.6996 D-Fake: 0.6863] Epoch 094 | ET 107.83 min | Avg Losses >> G/D 0.6995/1.3856 [D-Real: 0.6973 D-Fake: 0.6884] Epoch 095 | ET 108.98 min | Avg Losses >> G/D 0.6969/1.3854 [D-Real: 0.6939 D-Fake: 0.6915] Epoch 096 | ET 110.14 min | Avg Losses >> G/D 0.6992/1.3852 [D-Real: 0.6958 D-Fake: 0.6893] Epoch 097 | ET 111.29 min | Avg Losses >> G/D 0.7088/1.3778 [D-Real: 0.6881 D-Fake: 0.6898] Epoch 098 | ET 112.44 min | Avg Losses >> G/D 0.7067/1.3890 [D-Real: 0.7023 D-Fake: 0.6868] Epoch 099 | ET 113.59 min | Avg Losses >> G/D 0.6960/1.3856 [D-Real: 0.6938 D-Fake: 0.6918] Epoch 100 | ET 114.74 min | Avg Losses >> G/D 0.6959/1.3857 [D-Real: 0.6939 D-Fake: 0.6918]
#import pickle
#pickle.dump({'all_losses':all_losses,
# 'all_d_vals':all_d_vals,
# 'samples':epoch_samples},
# open('/content/drive/My Drive/Colab Notebooks/PyML-3rd-edition/ch17-dcgan-learning.pkl', 'wb'))
#gen_model.save('/content/drive/My Drive/Colab Notebooks/PyML-3rd-edition/ch17-dcgangan_gen.h5')
#disc_model.save('/content/drive/My Drive/Colab Notebooks/PyML-3rd-edition/ch17-dcgan_disc.h5')
import itertools
fig = plt.figure(figsize=(16, 6))
## Plotting the losses
ax = fig.add_subplot(1, 2, 1)
g_losses = [item[0] for item in itertools.chain(*all_losses)]
d_losses = [item[1]/2.0 for item in itertools.chain(*all_losses)]
plt.plot(g_losses, label='Generator loss', alpha=0.95)
plt.plot(d_losses, label='Discriminator loss', alpha=0.95)
plt.legend(fontsize=20)
ax.set_xlabel('Iteration', size=15)
ax.set_ylabel('Loss', size=15)
epochs = np.arange(1, 101)
epoch2iter = lambda e: e*len(all_losses[-1])
epoch_ticks = [1, 20, 40, 60, 80, 100]
newpos = [epoch2iter(e) for e in epoch_ticks]
ax2 = ax.twiny()
ax2.set_xticks(newpos)
ax2.set_xticklabels(epoch_ticks)
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 60))
ax2.set_xlabel('Epoch', size=15)
ax2.set_xlim(ax.get_xlim())
ax.tick_params(axis='both', which='major', labelsize=15)
ax2.tick_params(axis='both', which='major', labelsize=15)
## Plotting the outputs of the discriminator
ax = fig.add_subplot(1, 2, 2)
d_vals_real = [item[0] for item in itertools.chain(*all_d_vals)]
d_vals_fake = [item[1] for item in itertools.chain(*all_d_vals)]
plt.plot(d_vals_real, alpha=0.75, label=r'Real: $D(\mathbf{x})$')
plt.plot(d_vals_fake, alpha=0.75, label=r'Fake: $D(G(\mathbf{z}))$')
plt.legend(fontsize=20)
ax.set_xlabel('Iteration', size=15)
ax.set_ylabel('Discriminator output', size=15)
ax2 = ax.twiny()
ax2.set_xticks(newpos)
ax2.set_xticklabels(epoch_ticks)
ax2.xaxis.set_ticks_position('bottom')
ax2.xaxis.set_label_position('bottom')
ax2.spines['bottom'].set_position(('outward', 60))
ax2.set_xlabel('Epoch', size=15)
ax2.set_xlim(ax.get_xlim())
ax.tick_params(axis='both', which='major', labelsize=15)
ax2.tick_params(axis='both', which='major', labelsize=15)
#plt.savefig('images/ch17-dcgan-learning-curve.pdf')
plt.show()
selected_epochs = [1, 2, 4, 10, 50, 100]
fig = plt.figure(figsize=(10, 14))
for i,e in enumerate(selected_epochs):
for j in range(5):
ax = fig.add_subplot(6, 5, i*5+j+1)
ax.set_xticks([])
ax.set_yticks([])
if j == 0:
ax.text(
-0.06, 0.5, 'Epoch {}'.format(e),
rotation=90, size=18, color='red',
horizontalalignment='right',
verticalalignment='center',
transform=ax.transAxes)
image = epoch_samples[e-1][j]
ax.imshow(image, cmap='gray_r')
#plt.savefig('images/ch17-dcgan-samples.pdf')
plt.show()
! python ../.convert_notebook_to_script.py --input ch17_optional_DCGAN.ipynb --output ch17_optional_DCGAN.py
[NbConvertApp] Converting notebook ch17_optional_DCGAN.ipynb to script [NbConvertApp] Writing 15401 bytes to ch17_optional_DCGAN.py