%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from time import time
from keras.models import Model, Sequential
from keras.optimizers import Adam
import keras.backend as K
from keras.utils.generic_utils import Progbar
from model import *
/home/mathlab115/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters Using TensorFlow backend.
# for resist GPU memory
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
from keras.datasets import cifar100, cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
X = np.concatenate((x_test,x_train))
X.shape
(60000, 32, 32, 3)
plt.imshow(X[9487])
<matplotlib.image.AxesImage at 0x7f7904346780>
#Hyperperemeter
BATCHSIZE=64
LEARNING_RATE = 0.0002
TRAINING_RATIO = 1
BETA_1 = 0.0
BETA_2 = 0.9
EPOCHS = 500
BN_MIMENTUM = 0.9
BN_EPSILON = 0.00002
SAVE_DIR = 'img/generated_img_CIFAR10_ResNet/'
GENERATE_ROW_NUM = 8
GENERATE_BATCHSIZE = GENERATE_ROW_NUM*GENERATE_ROW_NUM
def wasserstein_loss(y_true, y_pred):
return K.mean(y_true*y_pred)
generator = BuildGenerator(bn_momentum=BN_MIMENTUM, bn_epsilon=BN_EPSILON)
discriminator = BuildDiscriminator()
Noise_input_for_training_generator = Input(shape=(128,))
Generated_image = generator(Noise_input_for_training_generator)
Discriminator_output = discriminator(Generated_image)
model_for_training_generator = Model(Noise_input_for_training_generator, Discriminator_output)
print("model_for_training_generator")
discriminator.trainable = False
model_for_training_generator.summary()
model_for_training_generator.compile(optimizer=Adam(LEARNING_RATE, beta_1=BETA_1, beta_2=BETA_2), loss=wasserstein_loss)
Generator _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) (None, 128) 0 _________________________________________________________________ dense_1 (Dense) (None, 4096) 528384 _________________________________________________________________ reshape_1 (Reshape) (None, 4, 4, 256) 0 _________________________________________________________________ Generator_resblock_1 (Model) (None, 8, 8, 256) 1248000 _________________________________________________________________ Generator_resblock_2 (Model) (None, 16, 16, 256) 1248000 _________________________________________________________________ Generator_resblock_3 (Model) (None, 32, 32, 256) 1248000 _________________________________________________________________ batch_normalization_7 (Batch (None, 32, 32, 256) 1024 _________________________________________________________________ activation_7 (Activation) (None, 32, 32, 256) 0 _________________________________________________________________ conv2d_10 (Conv2D) (None, 32, 32, 3) 6915 ================================================================= Total params: 4,280,323 Trainable params: 4,276,739 Non-trainable params: 3,584 _________________________________________________________________ Discriminator Spectral Normalization: True _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_5 (InputLayer) (None, 32, 32, 3) 0 _________________________________________________________________ Discriminator_resblock_Down_ (None, 16, 16, 128) 151680 _________________________________________________________________ Discriminator_resblock_Down_ (None, 8, 8, 128) 311680 _________________________________________________________________ Discriminator_resblock_1 (Mo (None, 8, 8, 128) 311680 _________________________________________________________________ Discriminator_resblock_2 (Mo (None, 8, 8, 128) 311680 _________________________________________________________________ activation_16 (Activation) (None, 8, 8, 128) 0 _________________________________________________________________ global_average_pooling2d_1 ( (None, 128) 0 _________________________________________________________________ dense_sn_1 (DenseSN) (None, 1) 129 ================================================================= Total params: 1,086,849 Trainable params: 1,086,849 Non-trainable params: 0 _________________________________________________________________ model_for_training_generator _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_10 (InputLayer) (None, 128) 0 _________________________________________________________________ Generator (Model) (None, 32, 32, 3) 4280323 _________________________________________________________________ Discriminator (Model) (None, 1) 1086849 ================================================================= Total params: 5,367,172 Trainable params: 4,276,739 Non-trainable params: 1,090,433 _________________________________________________________________
Real_image = Input(shape=(32,32,3))
Noise_input_for_training_discriminator = Input(shape=(128,))
Fake_image = generator(Noise_input_for_training_discriminator)
Discriminator_output_for_real = discriminator(Real_image)
Discriminator_output_for_fake = discriminator(Fake_image)
model_for_training_discriminator = Model([Real_image,
Noise_input_for_training_discriminator],
[Discriminator_output_for_real,
Discriminator_output_for_fake])
print("model_for_training_discriminator")
generator.trainable = False
discriminator.trainable = True
model_for_training_discriminator.compile(optimizer=Adam(LEARNING_RATE, beta_1=BETA_1, beta_2=BETA_2), loss=[wasserstein_loss, wasserstein_loss])
model_for_training_discriminator.summary()
model_for_training_discriminator ____________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ==================================================================================================== input_12 (InputLayer) (None, 128) 0 ____________________________________________________________________________________________________ input_11 (InputLayer) (None, 32, 32, 3) 0 ____________________________________________________________________________________________________ Generator (Model) (None, 32, 32, 3) 4280323 input_12[0][0] ____________________________________________________________________________________________________ Discriminator (Model) (None, 1) 1086849 input_11[0][0] Generator[2][0] ==================================================================================================== Total params: 5,367,172 Trainable params: 1,086,849 Non-trainable params: 4,280,323 ____________________________________________________________________________________________________
real_y = np.ones((BATCHSIZE, 1), dtype=np.float32)
fake_y = -real_y
X = X/255*2-1
test_noise = np.random.randn(GENERATE_BATCHSIZE, 128)
W_loss = []
discriminator_loss = []
generator_loss = []
for epoch in range(EPOCHS):
np.random.shuffle(X)
print("epoch {} of {}".format(epoch+1, EPOCHS))
num_batches = int(X.shape[0] // BATCHSIZE)
print("number of batches: {}".format(int(X.shape[0] // (BATCHSIZE))))
progress_bar = Progbar(target=int(X.shape[0] // (BATCHSIZE * TRAINING_RATIO)))
minibatches_size = BATCHSIZE * TRAINING_RATIO
start_time = time()
for index in range(int(X.shape[0] // (BATCHSIZE * TRAINING_RATIO))):
progress_bar.update(index)
discriminator_minibatches = X[index * minibatches_size:(index + 1) * minibatches_size]
for j in range(TRAINING_RATIO):
image_batch = discriminator_minibatches[j * BATCHSIZE : (j + 1) * BATCHSIZE]
noise = np.random.randn(BATCHSIZE, 128).astype(np.float32)
discriminator.trainable = True
generator.trainable = False
discriminator_loss.append(model_for_training_discriminator.train_on_batch([image_batch, noise],
[real_y, fake_y]))
discriminator.trainable = False
generator.trainable = True
generator_loss.append(model_for_training_generator.train_on_batch(np.random.randn(BATCHSIZE, 128), real_y))
print('\nepoch time: {}'.format(time()-start_time))
W_real = model_for_training_generator.evaluate(test_noise, real_y)
print(W_real)
W_fake = model_for_training_generator.evaluate(test_noise, fake_y)
print(W_fake)
W_l = W_real+W_fake
print('wasserstein_loss: {}'.format(W_l))
W_loss.append(W_l)
#Generate image
generated_image = generator.predict(test_noise)
generated_image = (generated_image+1)/2
for i in range(GENERATE_ROW_NUM):
new = generated_image[i*GENERATE_ROW_NUM:i*GENERATE_ROW_NUM+GENERATE_ROW_NUM].reshape(32*GENERATE_ROW_NUM,32,3)
if i!=0:
old = np.concatenate((old,new),axis=1)
else:
old = new
print('plot generated_image')
plt.imsave('{}/SN_epoch_{}.png'.format(SAVE_DIR, epoch), old)
epoch 1 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.94880771636963 32/64 [==============>...............] - ETA: 0s1.4451154470443726 32/64 [==============>...............] - ETA: 0s-1.4451155066490173 wasserstein_loss: -5.960464477539063e-08 plot generated_image epoch 2 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 132.20580124855042 32/64 [==============>...............] - ETA: 0s-0.1759192794561386 32/64 [==============>...............] - ETA: 0s0.17591925710439682 wasserstein_loss: -2.2351741790771484e-08 plot generated_image epoch 3 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 131.93839740753174 32/64 [==============>...............] - ETA: 0s3.2876545190811157 32/64 [==============>...............] - ETA: 0s-3.2876555919647217 wasserstein_loss: -1.0728836059570312e-06 plot generated_image epoch 4 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 133.41933369636536 32/64 [==============>...............] - ETA: 0s-0.14881636202335358 32/64 [==============>...............] - ETA: 0s0.14881636202335358 wasserstein_loss: 0.0 plot generated_image epoch 5 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.3112678527832 32/64 [==============>...............] - ETA: 0s-0.028686107136309147 32/64 [==============>...............] - ETA: 0s0.028686104342341423 wasserstein_loss: -2.7939677238464355e-09 plot generated_image epoch 6 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.40845894813538 32/64 [==============>...............] - ETA: 0s-0.09372971951961517 32/64 [==============>...............] - ETA: 0s0.09372971206903458 wasserstein_loss: -7.450580596923828e-09 plot generated_image epoch 7 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.31502103805542 32/64 [==============>...............] - ETA: 0s-0.04031790792942047 32/64 [==============>...............] - ETA: 0s0.04031790792942047 wasserstein_loss: 0.0 plot generated_image epoch 8 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.19004201889038 32/64 [==============>...............] - ETA: 0s-0.03722946532070637 32/64 [==============>...............] - ETA: 0s0.03722946345806122 wasserstein_loss: -1.862645149230957e-09 plot generated_image epoch 9 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.34114503860474 32/64 [==============>...............] - ETA: 0s-0.07468081265687943 32/64 [==============>...............] - ETA: 0s0.07468081265687943 wasserstein_loss: 0.0 plot generated_image epoch 10 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.26677441596985 32/64 [==============>...............] - ETA: 0s-0.06679900735616684 32/64 [==============>...............] - ETA: 0s0.06679900735616684 wasserstein_loss: 0.0 plot generated_image epoch 11 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.4104232788086 32/64 [==============>...............] - ETA: 0s-0.07402388378977776 32/64 [==============>...............] - ETA: 0s0.07402388378977776 wasserstein_loss: 0.0 plot generated_image epoch 12 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.35877871513367 32/64 [==============>...............] - ETA: 0s-0.06437696516513824 32/64 [==============>...............] - ETA: 0s0.06437696143984795 wasserstein_loss: -3.725290298461914e-09 plot generated_image epoch 13 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.47368812561035 32/64 [==============>...............] - ETA: 0s-0.06496286764740944 32/64 [==============>...............] - ETA: 0s0.06496286764740944 wasserstein_loss: 0.0 plot generated_image epoch 14 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.35207867622375 32/64 [==============>...............] - ETA: 0s-0.06923170387744904 32/64 [==============>...............] - ETA: 0s0.06923170387744904 wasserstein_loss: 0.0 plot generated_image epoch 15 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.4222342967987 32/64 [==============>...............] - ETA: 0s-0.07500187680125237 32/64 [==============>...............] - ETA: 0s0.07500187307596207 wasserstein_loss: -3.725290298461914e-09 plot generated_image epoch 16 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.20164108276367 32/64 [==============>...............] - ETA: 0s-0.07157521322369576 32/64 [==============>...............] - ETA: 0s0.07157521322369576 wasserstein_loss: 0.0 plot generated_image epoch 17 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.54905438423157 32/64 [==============>...............] - ETA: 0s-0.06725597754120827 32/64 [==============>...............] - ETA: 0s0.06725598871707916 wasserstein_loss: 1.1175870895385742e-08 plot generated_image epoch 18 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.36335039138794 32/64 [==============>...............] - ETA: 0s-0.06717506051063538 32/64 [==============>...............] - ETA: 0s0.06717505678534508 wasserstein_loss: -3.725290298461914e-09 plot generated_image epoch 19 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.7008762359619 32/64 [==============>...............] - ETA: 0s-0.06350045651197433 32/64 [==============>...............] - ETA: 0s0.06350045651197433 wasserstein_loss: 0.0 plot generated_image epoch 20 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.35547423362732 32/64 [==============>...............] - ETA: 0s-0.06133383326232433 32/64 [==============>...............] - ETA: 0s0.061333831399679184 wasserstein_loss: -1.862645149230957e-09 plot generated_image epoch 21 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.38607931137085 32/64 [==============>...............] - ETA: 0s-0.05890892446041107 32/64 [==============>...............] - ETA: 0s0.05890892446041107 wasserstein_loss: 0.0 plot generated_image epoch 22 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.52250981330872 32/64 [==============>...............] - ETA: 0s-0.05670524388551712 32/64 [==============>...............] - ETA: 0s0.05670524388551712 wasserstein_loss: 0.0 plot generated_image epoch 23 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.33417344093323 32/64 [==============>...............] - ETA: 0s-0.053800102323293686 32/64 [==============>...............] - ETA: 0s0.053800102323293686 wasserstein_loss: 0.0 plot generated_image epoch 24 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.33403158187866 32/64 [==============>...............] - ETA: 0s-0.05269063822925091 32/64 [==============>...............] - ETA: 0s0.05269063822925091 wasserstein_loss: 0.0 plot generated_image epoch 25 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.1869478225708 32/64 [==============>...............] - ETA: 0s-0.052681947126984596 32/64 [==============>...............] - ETA: 0s0.05268194153904915 wasserstein_loss: -5.587935447692871e-09 plot generated_image epoch 26 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.18537831306458 32/64 [==============>...............] - ETA: 0s-0.05041646212339401 32/64 [==============>...............] - ETA: 0s0.05041646212339401 wasserstein_loss: 0.0 plot generated_image epoch 27 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.37776684761047 32/64 [==============>...............] - ETA: 0s-0.05045543052256107 32/64 [==============>...............] - ETA: 0s0.05045543052256107 wasserstein_loss: 0.0 plot generated_image epoch 28 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.38412523269653 32/64 [==============>...............] - ETA: 0s-0.04955735802650452 32/64 [==============>...............] - ETA: 0s0.04955735802650452 wasserstein_loss: 0.0 plot generated_image epoch 29 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.40204763412476 32/64 [==============>...............] - ETA: 0s-0.0485646091401577 32/64 [==============>...............] - ETA: 0s0.04856461472809315 wasserstein_loss: 5.587935447692871e-09 plot generated_image epoch 30 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.36450219154358 32/64 [==============>...............] - ETA: 0s-0.04919157549738884 32/64 [==============>...............] - ETA: 0s0.04919157549738884 wasserstein_loss: 0.0 plot generated_image epoch 31 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.43105340003967 32/64 [==============>...............] - ETA: 0s-0.04786474257707596 32/64 [==============>...............] - ETA: 0s0.04786474257707596 wasserstein_loss: 0.0 plot generated_image epoch 32 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.46417713165283 32/64 [==============>...............] - ETA: 0s-0.04680074378848076 32/64 [==============>...............] - ETA: 0s0.04680074378848076 wasserstein_loss: 0.0 plot generated_image epoch 33 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.5649073123932 32/64 [==============>...............] - ETA: 0s-0.046741463243961334 32/64 [==============>...............] - ETA: 0s0.04674146696925163 wasserstein_loss: 3.725290298461914e-09 plot generated_image epoch 34 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.27696180343628 32/64 [==============>...............] - ETA: 0s-0.046080177649855614 32/64 [==============>...............] - ETA: 0s0.046080177649855614 wasserstein_loss: 0.0 plot generated_image epoch 35 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.30296730995178 32/64 [==============>...............] - ETA: 0s-0.04417840763926506 32/64 [==============>...............] - ETA: 0s0.04417840950191021 wasserstein_loss: 1.862645149230957e-09 plot generated_image epoch 36 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.5395781993866 32/64 [==============>...............] - ETA: 0s-0.04403984174132347 32/64 [==============>...............] - ETA: 0s0.04403984174132347 wasserstein_loss: 0.0 plot generated_image epoch 37 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.24879908561707 32/64 [==============>...............] - ETA: 0s-0.043628064915537834 32/64 [==============>...............] - ETA: 0s0.043628064915537834 wasserstein_loss: 0.0 plot generated_image epoch 38 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.36790990829468 32/64 [==============>...............] - ETA: 0s-0.043931981548666954 32/64 [==============>...............] - ETA: 0s0.043931981548666954 wasserstein_loss: 0.0 plot generated_image epoch 39 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.54706454277039 32/64 [==============>...............] - ETA: 0s-0.043369974941015244 32/64 [==============>...............] - ETA: 0s0.043369974941015244 wasserstein_loss: 0.0 plot generated_image epoch 40 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.45085310935974 32/64 [==============>...............] - ETA: 0s-0.04330463334918022 32/64 [==============>...............] - ETA: 0s0.04330463334918022 wasserstein_loss: 0.0 plot generated_image epoch 41 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.13854098320007 32/64 [==============>...............] - ETA: 0s-0.04290587082505226 32/64 [==============>...............] - ETA: 0s0.04290587082505226 wasserstein_loss: 0.0 plot generated_image epoch 42 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.40039730072021 32/64 [==============>...............] - ETA: 0s-0.04214660823345184 32/64 [==============>...............] - ETA: 0s0.04214660823345184 wasserstein_loss: 0.0 plot generated_image epoch 43 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.34528827667236 32/64 [==============>...............] - ETA: 0s-0.04079541377723217 32/64 [==============>...............] - ETA: 0s0.04079541377723217 wasserstein_loss: 0.0 plot generated_image epoch 44 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.2051465511322 32/64 [==============>...............] - ETA: 0s-0.04228292405605316 32/64 [==============>...............] - ETA: 0s0.04228292591869831 wasserstein_loss: 1.862645149230957e-09 plot generated_image epoch 45 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.26617288589478 32/64 [==============>...............] - ETA: 0s-0.0420759841799736 32/64 [==============>...............] - ETA: 0s0.0420759841799736 wasserstein_loss: 0.0 plot generated_image epoch 46 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.5399990081787 32/64 [==============>...............] - ETA: 0s-0.0409205537289381 32/64 [==============>...............] - ETA: 0s0.04092055559158325 wasserstein_loss: 1.862645149230957e-09 plot generated_image epoch 47 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.26917552947998 32/64 [==============>...............] - ETA: 0s-0.04017970524728298 32/64 [==============>...............] - ETA: 0s0.04017970524728298 wasserstein_loss: 0.0 plot generated_image epoch 48 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.30178689956665 32/64 [==============>...............] - ETA: 0s-0.03962242044508457 32/64 [==============>...............] - ETA: 0s0.03962242044508457 wasserstein_loss: 0.0 plot generated_image epoch 49 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.654381275177 32/64 [==============>...............] - ETA: 0s-0.03883266821503639 32/64 [==============>...............] - ETA: 0s0.03883267007768154 wasserstein_loss: 1.862645149230957e-09 plot generated_image epoch 50 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.28032326698303 32/64 [==============>...............] - ETA: 0s-0.038484446704387665 32/64 [==============>...............] - ETA: 0s0.038484446704387665 wasserstein_loss: 0.0 plot generated_image epoch 51 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.2165331840515 32/64 [==============>...............] - ETA: 0s-0.03752958960831165 32/64 [==============>...............] - ETA: 0s0.03752958960831165 wasserstein_loss: 0.0 plot generated_image epoch 52 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.60667252540588 32/64 [==============>...............] - ETA: 0s-0.03707117587327957 32/64 [==============>...............] - ETA: 0s0.03707117587327957 wasserstein_loss: 0.0 plot generated_image epoch 53 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.48254561424255 32/64 [==============>...............] - ETA: 0s-0.03780117630958557 32/64 [==============>...............] - ETA: 0s0.03780117630958557 wasserstein_loss: 0.0 plot generated_image epoch 54 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.3475034236908 32/64 [==============>...............] - ETA: 0s-0.03709513880312443 32/64 [==============>...............] - ETA: 0s0.03709513694047928 wasserstein_loss: -1.862645149230957e-09 plot generated_image epoch 55 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.36211466789246 32/64 [==============>...............] - ETA: 0s-0.035701872780919075 32/64 [==============>...............] - ETA: 0s0.035701872780919075 wasserstein_loss: 0.0 plot generated_image epoch 56 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.38477110862732 32/64 [==============>...............] - ETA: 0s-0.03565170802175999 32/64 [==============>...............] - ETA: 0s0.03565170802175999 wasserstein_loss: 0.0 plot generated_image epoch 57 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.4325511455536 32/64 [==============>...............] - ETA: 0s-0.03558463603258133 32/64 [==============>...............] - ETA: 0s0.03558463603258133 wasserstein_loss: 0.0 plot generated_image epoch 58 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.16646671295166 32/64 [==============>...............] - ETA: 0s-0.035412872210145 32/64 [==============>...............] - ETA: 0s0.03541287034749985 wasserstein_loss: -1.862645149230957e-09 plot generated_image epoch 59 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.04189801216125 32/64 [==============>...............] - ETA: 0s-0.03521161526441574 32/64 [==============>...............] - ETA: 0s0.03521161526441574 wasserstein_loss: 0.0 plot generated_image epoch 60 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.45459127426147 32/64 [==============>...............] - ETA: 0s-0.03505341894924641 32/64 [==============>...............] - ETA: 0s0.03505341894924641 wasserstein_loss: 0.0 plot generated_image epoch 61 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.5339035987854 32/64 [==============>...............] - ETA: 0s-0.034686196595430374 32/64 [==============>...............] - ETA: 0s0.034686196595430374 wasserstein_loss: 0.0 plot generated_image epoch 62 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.39927077293396 32/64 [==============>...............] - ETA: 0s-0.03436245024204254 32/64 [==============>...............] - ETA: 0s0.03436245210468769 wasserstein_loss: 1.862645149230957e-09 plot generated_image epoch 63 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.6504819393158 32/64 [==============>...............] - ETA: 0s-0.033955931663513184 32/64 [==============>...............] - ETA: 0s0.033955931663513184 wasserstein_loss: 0.0 plot generated_image epoch 64 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.54109740257263 32/64 [==============>...............] - ETA: 0s-0.034003784880042076 32/64 [==============>...............] - ETA: 0s0.034003784880042076 wasserstein_loss: 0.0 plot generated_image epoch 65 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.44626569747925 32/64 [==============>...............] - ETA: 0s-0.03345862403512001 32/64 [==============>...............] - ETA: 0s0.03345862403512001 wasserstein_loss: 0.0 plot generated_image epoch 66 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.22644710540771 32/64 [==============>...............] - ETA: 0s-0.03287150710821152 32/64 [==============>...............] - ETA: 0s0.03287150710821152 wasserstein_loss: 0.0 plot generated_image epoch 67 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.53603744506836 32/64 [==============>...............] - ETA: 0s-0.02548270206898451 32/64 [==============>...............] - ETA: 0s0.025482701137661934 wasserstein_loss: -9.313225746154785e-10 plot generated_image epoch 247 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.54178643226624 32/64 [==============>...............] - ETA: 0s-0.025010794401168823 32/64 [==============>...............] - ETA: 0s0.025010794401168823 wasserstein_loss: 0.0 plot generated_image epoch 248 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.50311303138733 32/64 [==============>...............] - ETA: 0s-0.02553188055753708 32/64 [==============>...............] - ETA: 0s0.02553188055753708 wasserstein_loss: 0.0 plot generated_image epoch 249 of 500 number of batches: 937 936/937 [============================>.] - ETA: 0s epoch time: 135.25214552879333 32/64 [==============>...............] - ETA: 0s-0.025545545853674412 32/64 [==============>...............] - ETA: 0s0.025545545853674412 wasserstein_loss: 0.0 plot generated_image epoch 250 of 500 number of batches: 937 418/937 [============>.................] - ETA: 75s
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-13-9c2c40cad8e3> in <module>() 25 generator.trainable = False 26 discriminator_loss.append(model_for_training_discriminator.train_on_batch([image_batch, noise], ---> 27 [real_y, fake_y])) 28 discriminator.trainable = False 29 generator.trainable = True /home/mathlab115/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight) 1760 ins = x + y + sample_weights 1761 self._make_train_function() -> 1762 outputs = self.train_function(ins) 1763 if len(outputs) == 1: 1764 return outputs[0] /home/mathlab115/anaconda3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs) 2271 updated = session.run(self.outputs + [self.updates_op], 2272 feed_dict=feed_dict, -> 2273 **self.session_kwargs) 2274 return updated[:len(self.outputs)] 2275 /home/mathlab115/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata) 776 try: 777 result = self._run(None, fetches, feed_dict, options_ptr, --> 778 run_metadata_ptr) 779 if run_metadata: 780 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) /home/mathlab115/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata) 980 if final_fetches or final_targets: 981 results = self._do_run(handle, final_targets, final_fetches, --> 982 feed_dict_string, options, run_metadata) 983 else: 984 results = [] /home/mathlab115/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata) 1030 if handle is None: 1031 return self._do_call(_run_fn, self._session, feed_dict, fetch_list, -> 1032 target_list, options, run_metadata) 1033 else: 1034 return self._do_call(_prun_fn, self._session, handle, feed_dict, /home/mathlab115/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 1037 def _do_call(self, fn, *args): 1038 try: -> 1039 return fn(*args) 1040 except errors.OpError as e: 1041 message = compat.as_text(e.message) /home/mathlab115/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata) 1019 return tf_session.TF_Run(session, options, 1020 feed_dict, fetch_list, target_list, -> 1021 status, run_metadata) 1022 1023 def _prun_fn(session, handle, feed_dict, fetch_list): KeyboardInterrupt: