# for tf 2.0
#!pip install -U tensorflow-gpu
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import time
from skimage.io import imshow
from IPython.display import display
tf.__version__
'2.0.0'
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_images.dtype, train_images.shape
(dtype('uint8'), (60000, 28, 28))
imshow(train_images[0])
<matplotlib.image.AxesImage at 0x7f91d40f8198>
def img_to_float(img):
return (np.float32(img)[..., None]-127.5)/127.5
def img_to_uint8(img):
return np.uint8(img*127.5+128).clip(0, 255)[...,0]
train_img_f32 = img_to_float(train_images)
imshow(img_to_uint8(train_img_f32[0]))
<matplotlib.image.AxesImage at 0x7f91c07ef0f0>
BUFFER_SIZE = train_img_f32.shape[0]
BATCH_SIZE = 32
train_dataset_y = tf.data.Dataset.from_tensor_slices(train_labels).map(lambda y: tf.one_hot(y, 10))
train_dataset_x = tf.data.Dataset.from_tensor_slices(train_img_f32)
train_dataset = tf.data.Dataset.zip((train_dataset_x, train_dataset_y)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
from tensorflow.keras.layers import Dense, BatchNormalization, LeakyReLU, Reshape, Conv2DTranspose, Input, concatenate
from tensorflow.keras import Model
latent_dim = 100
_0 = Input((latent_dim,))
_1 = Input((10,))
_ = concatenate([_0, _1])
_ = Dense(7*7*256, use_bias=False, input_shape=(latent_dim,))(_)
_ = BatchNormalization()(_)
_ = LeakyReLU()(_)
_ = Reshape((7, 7, 256))(_)
_ = Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(_)
_ = BatchNormalization()(_)
_ = LeakyReLU()(_)
_ = Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(_)
_ = BatchNormalization()(_)
_ = LeakyReLU()(_)
_ = Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(_)
generator = Model(inputs=[_0,_1], outputs=_)
from tensorflow.keras.layers import Conv2D, Dropout, Flatten
_i = Input((28,28, 1))
_ = Conv2D(64, (5, 5), strides=(2, 2), padding='same')(_i)
_ = LeakyReLU()(_)
_ = Dropout(0.3)(_)
_ = Conv2D(128, (5, 5), strides=(2, 2), padding='same')(_)
_ = LeakyReLU()(_)
_ = Dropout(0.3)(_)
_ = Flatten()(_)
_0 = Dense(1)(_)
_1 = Dense(10)(_)
discriminator = Model(inputs=_i, outputs=[_0, _1])
BCE = tf.keras.losses.BinaryCrossentropy(from_logits=True)
CCE = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
def generator_loss(generated_output, labels):
out_d, out_c = generated_output
loss_d = BCE(tf.ones_like(out_d), out_d)
loss_c = CCE(labels, out_c)
return loss_d+loss_c
def discriminator_loss(real_output, generated_output, labels):
# [1,1,...,1] with real output since it is true and we want our generated examples to look like it
real_out_d, real_out_c = real_output
real_loss_d = BCE(tf.ones_like(real_out_d), real_out_d)
real_loss_c = CCE(labels, real_out_c)
real_loss = real_loss_d + real_loss_c
# [0,0,...,0] with generated images since they are fake
generated_out_d, generated_out_c = generated_output
generated_loss = BCE(tf.zeros_like(generated_out_d), generated_out_d)
total_loss = real_loss + generated_loss
return total_loss
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
EPOCHS = 50
num_examples_to_generate = 20
# We'll re-use this random vector used to seed the generator so
# it will be easier to see the improvement over time.
random_vector_for_generation = tf.random.normal([num_examples_to_generate,
latent_dim])
condition_vector_generation = tf.one_hot(list(range(10))+list(range(10)), 10)
@tf.function
def train_step(images, labels):
# generating noise from a normal distribution
noise = tf.random.normal([BATCH_SIZE, latent_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images= generator([noise, labels], training=True)
real_output = discriminator(images, training=True)
generated_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(generated_output, labels)
disc_loss = discriminator_loss(real_output, generated_output, labels)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
for epoch in range(15):
start_time = time.time()
for images, labels in train_dataset:
train_step(images, labels)
fake = generator([random_vector_for_generation, condition_vector_generation], training=False)
fake_concat = np.transpose(img_to_uint8(fake), [1,0,2]).reshape((28,-1))
print(epoch, time.time()-start_time)
display(PIL.Image.fromarray(fake_concat))
0 16.534207582473755
1 11.346590757369995
2 11.369076013565063
3 11.440730094909668
4 11.184944868087769
5 11.39745545387268
6 11.733301401138306
7 11.54923415184021
8 11.390678405761719
9 11.645015954971313
10 11.33017086982727
11 11.41501784324646
12 11.527951955795288
13 11.528112411499023
14 11.430882453918457