import os,sys
print(sys.version)
import re
from pathlib import Path
import math
3.5.2 (default, Nov 23 2017, 16:37:01) [GCC 5.4.0 20160609]
# OPTIONAL: Load the "autoreload" extension so that code can change
%load_ext autoreload
# OPTIONAL: always reload modules so that as you change code in src, it gets loaded
%autoreload 2
# If you want to reload manually, add a below line head.
%aimport
# ref: https://ipython.org/ipython-doc/3/config/extensions/autoreload.html
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
from sklearn import datasets
import tqdm
import csv
import pandas as pd
seed = None
np.random.seed(seed=seed)
print("numpy ver: {}".format(np.__version__))
print("scikit-learn ver: {}".format(sklearn.__version__))
print("pandas ver: {}".format(pd.__version__))
Modules to reload: all-except-skipped Modules to skip: numpy ver: 1.15.1 scikit-learn ver: 0.19.2 pandas ver: 0.23.4
#____________________________________________________________________________________________________
# TensorFlow and Keras GPU configures
##________________________________________________________________________________
## OPTIONAL : set a GPU viewed by TensorFlow
###____________________________________________________________
### - https://stackoverflow.com/questions/37893755/tensorflow-set-cuda-visible-devices-within-jupyter
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
##________________________________________________________________________________
##________________________________________________________________________________
## TensorFlow
###____________________________________________________________
import tensorflow as tf
print("tensorflow ver: {}".format(tf.__version__))
### eager mode
#tf.enable_eager_execution()
print("tf.executing_eagerly(): {}".format(tf.executing_eagerly()))
# You can double check that you have the correct devices visible to TF
# - https://stackoverflow.com/questions/37893755/tensorflow-set-cuda-visible-devices-within-jupyter
from tensorflow.python.client import device_lib
print("""
________________________________________
Visible GPUs from TensorFlow
________________________________________""")
for _device in device_lib.list_local_devices():
match = re.search(pattern=r'name: "/device:(?P<name>[A-Z]{3}):(?P<device_num>\d{1})*',
string=str(_device))
if match is None:
print("Not Match")
continue
if match.group("name") == "CPU":
name, device_num = match.group("name", "device_num")
print()
print("({}:{})".format(name, device_num))
continue
name, device_num = match.group("name", "device_num")
match = re.search(pattern=r'.*pci bus id: (?P<pci_bus_id>\d{4}:\d{2}:\d{2}.\d{1}).*',
string=str(_device))
if match is None:
print("No GPUs")
continue
print("({}:{}: pci_bus_id: {})".format(name, device_num, match.group("pci_bus_id")))
print("________________________________________")
###____________________________________________________________
### sessioin
global _SESSION
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True)
config.gpu_options.allow_growth = True
#_SESSION = tf.Session(config=config)
###____________________________________________________________
##________________________________________________________________________________
#____________________________________________________________________________________________________
tensorflow ver: 1.10.1 tf.executing_eagerly(): False ________________________________________ Visible GPUs from TensorFlow ________________________________________ (CPU:0) (GPU:0: pci_bus_id: 0000:01:00.0) ________________________________________
HOME = Path(os.getcwd()).parent
print(HOME)
/home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku
path_list = []
data_Path = HOME / "data"
path_list.append(data_Path)
raw_Path = data_Path / "raw"
path_list.append(raw_Path)
plot_images_Path = data_Path / "plot_images"
path_list.append(plot_images_Path)
src_Path = HOME / "src"
path_list.append(src_Path)
for _Path in path_list:
_path = str(_Path)
if not os.path.exists(_path):
os.makedirs(name=_path)
print("make a directory: \n\t", _path)
else:
print(os.path.exists(_path), ": ", _path)
True : /home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku/data True : /home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku/data/raw True : /home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku/data/plot_images True : /home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku/src
sys.path.append(str(src_Path))
from data import mnist_input_data
from utils_tensorflow.tensorflow_graph_in_jupyer import show_computational_graph
tf.reset_default_graph()
layer_sizes = [784, 1000, 500, 250, 250, 250, 10]
L = len(layer_sizes) - 1 # number of layers
num_examples = 60000
num_epochs = 150
num_labeled = 100
starter_learning_rate = 0.02
decay_after = 15 # epoch after which to begin learning rate decay
batch_size = 100
num_iter = (num_examples//batch_size) * num_epochs # number of loop iterations
with tf.name_scope(name="PLACEHOLDER"):
inputs = tf.placeholder(tf.float32, shape=(None, layer_sizes[0]))
outputs = tf.placeholder(tf.float32)
def b_i(inits, size, name):
return tf.Variable(inits * tf.ones([size]), name=name)
def w_i(shape, name):
return tf.Variable(tf.random_normal(shape, name=name)) / math.sqrt(shape[0])
shapes = list(zip(list(layer_sizes)[:-1], list(layer_sizes[1:])))
weights = {
'W': [w_i(s, "W") for s in shapes], # Encoder weights
'V': [w_i(s[::-1], "V") for s in shapes], # Decoder weights
# batch normalization parameter to shift the normalized value
'beta': [b_i(0.0, layer_sizes[l+1], "beta") for l in range(L)],
# batch normalization parameter to scale the normalized value
'gamma': [b_i(1.0, layer_sizes[l+1], "beta") for l in range(L)],
}
noise_std = 0.3 # scaling factor for noise used in corrupted encoder
# hyperparameters that denote the importance of each layer
denoising_cost = [1000.0, 10.0, 0.10, 0.10, 0.10, 0.10, 0.10]
join = lambda l, u: tf.concat([l, u], 0)
labeled = lambda x : tf.slice(x, [0, 0], [batch_size, -1]) if x is not None else x
unlabeled = lambda x : tf.slice(x, [batch_size, 0], [-1, -1]) if x is not None else x
split_lu = lambda x : (labeled(x), unlabeled(x))
training = tf.placeholder(tf.bool)
ewma = tf.train.ExponentialMovingAverage(decay=0.99) # to calculate the moving averages of mean and variance
bn_assigns= [] # this list stores the updates to be made to average mean and variance
def batch_normalization(batch, mean=None, var=None):
"""
Parameters
----------
batch :
mean :
var :
Returns
-------
normalized batch :
"""
if mean is None or var is None: # まだ平均と分散を計算していない場合
mean, var = tf.nn.moments(batch, axes=[0])
return (batch - mean) / tf.sqrt(var + tf.constant(1e-10))
# average mean and variance of all layers
running_mean = [tf.Variable(tf.constant(0.0, shape=[l]), trainable=False) for l in layer_sizes[1:]]
running_var = [tf.Variable(tf.constant(1.0, shape=[l]), trainable=False) for l in layer_sizes[1:]]
def update_batch_normalization(batch, l):
"""
batch normalize + update average mean and variance of layer l
Parameters
----------
batch :
l : layer
Globals
-------
running_mean, running_var : list, These list stores average mean and variance of all layers
ewma : tf.train.ExponentialMovingAverage, Calculate the moving averages of mean and variance
bn_assigns : list, This list stores the updates to be made to average mean and variance
Returns
-------
normalized batch
"""
mean, var = tf.nn.moments(batch, axes=[0])
assign_mean = running_mean[l-1].assign(mean) # Update
assign_var = running_var[l-1].assign(var) # Update
bn_assigns.append(ewma.apply([running_mean[l-1], running_var[l-1]])) # Store moving averages
with tf.control_dependencies([assign_mean, assign_var]): # return after assign
return (batch - mean) / tf.sqrt(var + 1e-10)
def encoder(inputs, noise_std):
"""
Parameters
----------
inputs :
noised_std : float,
noised_std != 0.0 --> Corrupted Encoder
noised_std == 0.0 --> Clean Encoder
Globals
-------
split_lu : func
layer_sizes : list
weights : dict
join : func
batch_normalization : func
running_mean, running_var : list, These list stores average mean and variance of all layers
Returns
-------
"""
h = inputs + tf.random_normal(tf.shape(inputs)) * noise_std # add noise to input
d = {} # to store the pre-activation, activation, mean and variance for each layer
# The data for labeled and unlabeled examples are stored separately
d['labeled'] = {'z': {}, 'm': {}, 'v': {}, 'h': {}} # m=mean, v=variance
d['unlabeled'] = {'z': {}, 'm': {}, 'v': {}, 'h': {}} # m=mean, v=variance
d['labeled']['z'][0], d['unlabeled']['z'][0] = split_lu(h)
for l in range(1, L+1):
print( "Layer {:>3}: {:>5} -> {:>5}".format(l,layer_sizes[l-1], layer_sizes[l]) )
d['labeled']['h'][l-1], d['unlabeled']['h'][l-1] = split_lu(h)
z_pre = tf.matmul(h, weights['W'][l-1]) # pre-activation
z_pre_l, z_pre_u = split_lu(z_pre) # split labeled and unlabeled examples
m, v = tf.nn.moments(z_pre_u, axes=[0]) # compute mean, variance using twice later (efficiency)
#----------------------------------------
# if training:
def training_batch_norm():
# Training batch normalization
# batch normalization for labeled and unlabeled examples is performed separately
if noise_std > 0: # Corrupted Encoder
# Corrupted encoder
# batch normalization + noise
z = join(batch_normalization(z_pre_l), batch_normalization(z_pre_u, m, v))
z += tf.random_normal(tf.shape(z_pre)) * noise_std
else: # Clean Encoder
# Clean encoder
# batch normalization + update the average mean and variance using batch mean and variance of labeled examples
z = join(update_batch_normalization(z_pre_l, l), batch_normalization(z_pre_u, m, v))
return z
# else:
def eval_batch_norm():
# Evaluation batch normalization
# obtain average mean and variance and use it to normalize the batch
mean, var = ewma.average(running_mean[l-1]), ewma.average(running_var[l-1])
z = batch_normalization(z_pre, mean, var)
# Instead of the above statement, the use of the following 2 statements containing a typo
# consistently produces a 0.2% higher accuracy for unclear reasons.
# m_l, v_l = tf.nn.moments(z_pre_l, axes=[0])
# z = join(batch_normalization(z_pre_l, m_l, mean, var), batch_normalization(z_pre_u, mean, var))
return z
# perform batch normalization according to value of boolean "training" placeholder:
z = tf.cond(pred=training, true_fn=training_batch_norm, false_fn=eval_batch_norm)
#----------------------------------------
if l == L:
# use softmax activation in output layer
h = tf.nn.softmax(weights['gamma'][l-1] * (z + weights["beta"][l-1]))
else:
# use ReLU activation in hidden layers
h = tf.nn.relu(z + weights["beta"][l-1])
d['labeled']['z'][l] , d['unlabeled']['z'][l] = split_lu(z)
d['unlabeled']['m'][l], d['unlabeled']['v'][l] = m, v # save mean and variance of unlabeled examples for decoding
d['labeled']['h'][l], d['unlabeled']['h'][l] = split_lu(h)
return h, d
with tf.name_scope(name="Corrupted_Encoder"):
print( "=== Corrupted Encoder ===")
y_c, corr = encoder(inputs, noise_std)
with tf.name_scope(name="Clean_Encoder"):
print( "=== Clean Encoder ===" )
y, clean = encoder(inputs, 0.0) # 0.0 -> do not add noise
=== Corrupted Encoder === Layer 1: 784 -> 1000 Layer 2: 1000 -> 500 Layer 3: 500 -> 250 Layer 4: 250 -> 250 Layer 5: 250 -> 250 Layer 6: 250 -> 10 === Clean Encoder === Layer 1: 784 -> 1000 Layer 2: 1000 -> 500 Layer 3: 500 -> 250 Layer 4: 250 -> 250 Layer 5: 250 -> 250 Layer 6: 250 -> 10
def g_gauss(z_c, u, size):
"""
gaussian denoising function proposed in the original paper
Parameters
----------
z_c : z in Corrupted Layer
u : batch normalized h~(l) (l=0,...,L)
size :
Returns
-------
"""
w_i = lambda inits, name: tf.Variable(inits * tf.ones([size]), name=name)
a1 = w_i(0., 'a1')
a2 = w_i(1., 'a2')
a3 = w_i(0., 'a3')
a4 = w_i(0., 'a4')
a5 = w_i(0., 'a5')
a6 = w_i(0., 'a6')
a7 = w_i(1., 'a7')
a8 = w_i(0., 'a8')
a9 = w_i(0., 'a9')
a10 = w_i(0., 'a10')
mu = a1 * tf.sigmoid(a2 * u + a3) + a4 * u + a5
v = a6 * tf.sigmoid(a7 * u + a8) + a9 * u + a10
z_est = (z_c - mu) * v + mu
return z_est
# Decoder
print( "=== Decoder ===" )
with tf.name_scope(name="Decoder"):
z_est = {}
d_cost = [] # to store the denoising cost of all layers
for l in range(L, -1, -1):
print( "Layer {:>2}: {:>5} -> {:>5}, denoising cost: {:>7.1f}".format(l, layer_sizes[l+1] if l+1 < len(layer_sizes) else "None", layer_sizes[l], denoising_cost[l]))
z, z_c = clean['unlabeled']['z'][l], corr['unlabeled']['z'][l]
m, v = clean['unlabeled']['m'].get(l, 0), clean['unlabeled']['v'].get(l, 1-1e-10)
if l == L:
u = unlabeled(y_c)
else:
u = tf.matmul(z_est[l+1], weights['V'][l])
u = batch_normalization(u)
z_est[l] = g_gauss(z_c, u, layer_sizes[l])
z_est_bn = (z_est[l] - m) / v
# append the cost of this layer to d_cost
d_cost.append((tf.reduce_mean(tf.reduce_sum(tf.square(z_est_bn - z), 1)) / layer_sizes[l]) * denoising_cost[l])
=== Decoder === Layer 6: None -> 10, denoising cost: 0.1 Layer 5: 10 -> 250, denoising cost: 0.1 Layer 4: 250 -> 250, denoising cost: 0.1 Layer 3: 250 -> 250, denoising cost: 0.1 Layer 2: 250 -> 500, denoising cost: 0.1 Layer 1: 500 -> 1000, denoising cost: 10.0 Layer 0: 1000 -> 784, denoising cost: 1000.0
# calculate total unsupervised cost by adding the denoising cost of all layers
with tf.name_scope(name="Cost"):
u_cost = tf.add_n(d_cost)
y_N = labeled(y_c)
cost = -tf.reduce_mean(tf.reduce_sum(outputs*tf.log(y_N), 1)) # supervised cost
loss = cost + u_cost # total cost
with tf.name_scope(name="pred_cost"):
pred_cost = -tf.reduce_mean(tf.reduce_sum(outputs*tf.log(y), 1)) # cost used for prediction
with tf.name_scope(name="accuracy"):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(outputs, 1)) # no of correct predictions
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) * tf.constant(100.0)
with tf.name_scope(name="Optimizer"):
learning_rate = tf.Variable(starter_learning_rate, trainable=False)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# add the updates of batch normalization statistics to train_step
bn_updates = tf.group(*bn_assigns)
with tf.control_dependencies([train_step]):
train_step = tf.group(bn_updates)
show_computational_graph(tf.get_default_graph())
print( "=== Loading Data ===" )
mnist = mnist_input_data.read_data_sets(train_dir=str(raw_Path / "MNIST_data"), n_labeled=num_labeled,
fake_data=False, one_hot=True)
=== Loading Data === Extracting /home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku/data/raw/MNIST_data/train-images-idx3-ubyte.gz Extracting /home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku/data/raw/MNIST_data/train-labels-idx1-ubyte.gz Extracting /home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku/data/raw/MNIST_data/t10k-images-idx3-ubyte.gz Extracting /home/pollenjp/workdir/git/article_script/20180914__semi-supervised-deeplearning-ladder-networks__in_kabuku/data/raw/MNIST_data/t10k-labels-idx1-ubyte.gz
saver = tf.train.Saver()
print( "=== Starting Session ===" )
sess = tf.Session()
=== Starting Session ===
i_iter = 0
ckpt = tf.train.get_checkpoint_state('checkpoints/') # get latest checkpoint (if any)
if ckpt and ckpt.model_checkpoint_path:
# if checkpoint exists, restore the parameters and set epoch_n and i_iter
saver.restore(sess, ckpt.model_checkpoint_path)
epoch_n = int(ckpt.model_checkpoint_path.split('-')[1])
i_iter = (epoch_n+1) * (num_examples//batch_size)
print( "Restored Epoch ", epoch_n )
else:
# no checkpoint exists. create checkpoints directory if it does not exist.
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
init = tf.global_variables_initializer()
sess.run(init)
print( "=== Training ===" )
print( "Initial Accuracy: ", sess.run(accuracy,
feed_dict={
inputs: mnist.semi_test.images,
outputs: mnist.semi_test.labels,
training: False}), "%" )
=== Training === Initial Accuracy: 9.42 %
for i in tqdm.tqdm(range(i_iter, num_iter)):
images, labels = mnist.semi_train.next_batch(batch_size)
sess.run(train_step, feed_dict={inputs: images,
outputs: labels,
training: True})
if (i > 1) and ((i+1) % (num_iter//num_epochs) == 0):
epoch_n = i//(num_examples//batch_size)
if (epoch_n+1) >= decay_after:
# decay learning rate
# learning_rate = starter_learning_rate * ((num_epochs - epoch_n) / (num_epochs - decay_after))
ratio = 1.0 * (num_epochs - (epoch_n+1)) # epoch_n + 1 because learning rate is set for next epoch
ratio = max(0, ratio / (num_epochs - decay_after))
sess.run(learning_rate.assign(starter_learning_rate * ratio))
print( "iter {}: test_acc:{}%".format(i, sess.run(accuracy,
feed_dict={ inputs: mnist.semi_test.images,
outputs: mnist.semi_test.labels,
training: False}) ) )
#saver.save(sess, 'checkpoints/model.ckpt', epoch_n)
# print( "Epoch ", epoch_n, ", Accuracy: ", sess.run(accuracy, feed_dict={inputs: mnist.test.images, outputs:mnist.test.labels, training: False}), "%" )
#with open('train_log', 'a') as train_log:
# # write test accuracy to file "train_log"
# train_log_w = csv.writer(train_log)
# log_i = [epoch_n] + sess.run([accuracy], feed_dict={inputs: mnist.test.images, outputs: mnist.test.labels, training: False})
# train_log_w.writerow(log_i)
1%| | 603/90000 [00:12<29:02, 51.32it/s]
iter 599: test_acc:87.15999603271484%
1%|▏ | 1205/90000 [00:21<28:49, 51.34it/s]
iter 1199: test_acc:89.31999969482422%
2%|▏ | 1807/90000 [00:30<27:51, 52.78it/s]
iter 1799: test_acc:91.6199951171875%
3%|▎ | 2402/90000 [00:39<28:17, 51.59it/s]
iter 2399: test_acc:93.66000366210938%
3%|▎ | 3004/90000 [00:48<28:09, 51.50it/s]
iter 2999: test_acc:94.9000015258789%
4%|▍ | 3606/90000 [00:57<27:54, 51.58it/s]
iter 3599: test_acc:95.2300033569336%
5%|▍ | 4208/90000 [01:06<25:51, 55.29it/s]
iter 4199: test_acc:95.81000518798828%
5%|▌ | 4803/90000 [01:15<27:43, 51.20it/s]
iter 4799: test_acc:96.4800033569336%
6%|▌ | 5405/90000 [01:24<27:17, 51.65it/s]
iter 5399: test_acc:96.55999755859375%
7%|▋ | 6007/90000 [01:33<26:39, 52.52it/s]
iter 5999: test_acc:96.31999969482422%
7%|▋ | 6602/90000 [01:42<27:05, 51.31it/s]
iter 6599: test_acc:97.15999603271484%
8%|▊ | 7204/90000 [01:51<26:43, 51.63it/s]
iter 7199: test_acc:97.11000061035156%
9%|▊ | 7806/90000 [02:00<26:34, 51.54it/s]
iter 7799: test_acc:97.37999725341797%
9%|▉ | 8408/90000 [02:09<24:32, 55.42it/s]
iter 8399: test_acc:97.20999908447266%
10%|█ | 9003/90000 [02:19<36:30, 36.97it/s]
iter 8999: test_acc:97.48999786376953%
11%|█ | 9605/90000 [02:28<36:14, 36.97it/s]
iter 9599: test_acc:97.45999908447266%
11%|█▏ | 10206/90000 [02:37<33:14, 40.02it/s]
iter 10199: test_acc:97.65999603271484%
12%|█▏ | 10808/90000 [02:46<31:16, 42.20it/s]
iter 10799: test_acc:97.5%
13%|█▎ | 11403/90000 [02:55<35:31, 36.87it/s]
iter 11399: test_acc:97.58999633789062%
13%|█▎ | 12005/90000 [03:05<35:20, 36.79it/s]
iter 11999: test_acc:97.8499984741211%
14%|█▍ | 12606/90000 [03:14<32:34, 39.59it/s]
iter 12599: test_acc:97.69999694824219%
15%|█▍ | 13208/90000 [03:23<30:07, 42.49it/s]
iter 13199: test_acc:97.63999938964844%
15%|█▌ | 13803/90000 [03:32<34:25, 36.89it/s]
iter 13799: test_acc:97.68000030517578%
16%|█▌ | 14405/90000 [03:42<34:16, 36.76it/s]
iter 14399: test_acc:97.5199966430664%
17%|█▋ | 15006/90000 [03:51<31:19, 39.90it/s]
iter 14999: test_acc:97.90999603271484%
17%|█▋ | 15608/90000 [04:00<29:04, 42.64it/s]
iter 15599: test_acc:96.98999786376953%
18%|█▊ | 16203/90000 [04:09<33:36, 36.59it/s]
iter 16199: test_acc:97.77999877929688%
19%|█▊ | 16805/90000 [04:19<33:17, 36.65it/s]
iter 16799: test_acc:97.83999633789062%
19%|█▉ | 17406/90000 [04:28<30:18, 39.92it/s]
iter 17399: test_acc:97.81999969482422%
20%|██ | 18008/90000 [04:37<28:11, 42.55it/s]
iter 17999: test_acc:97.79999542236328%
21%|██ | 18603/90000 [04:46<32:07, 37.04it/s]
iter 18599: test_acc:97.87999725341797%
21%|██▏ | 19205/90000 [04:56<31:50, 37.06it/s]
iter 19199: test_acc:97.83999633789062%
22%|██▏ | 19806/90000 [05:05<29:23, 39.79it/s]
iter 19799: test_acc:98.0%
23%|██▎ | 20401/90000 [05:14<31:55, 36.34it/s]
iter 20399: test_acc:97.47999572753906%
23%|██▎ | 21003/90000 [05:23<31:32, 36.46it/s]
iter 20999: test_acc:97.86000061035156%
24%|██▍ | 21605/90000 [05:33<31:02, 36.72it/s]
iter 21599: test_acc:97.87999725341797%
25%|██▍ | 22206/90000 [05:42<28:21, 39.85it/s]
iter 22199: test_acc:97.52999877929688%
25%|██▌ | 22808/90000 [05:51<26:21, 42.50it/s]
iter 22799: test_acc:98.11000061035156%
26%|██▌ | 23403/90000 [06:00<30:13, 36.72it/s]
iter 23399: test_acc:98.12999725341797%
27%|██▋ | 24005/90000 [06:10<29:57, 36.72it/s]
iter 23999: test_acc:98.0999984741211%
27%|██▋ | 24606/90000 [06:19<27:12, 40.06it/s]
iter 24599: test_acc:97.79000091552734%
28%|██▊ | 25208/90000 [06:28<25:20, 42.62it/s]
iter 25199: test_acc:97.79000091552734%
29%|██▊ | 25803/90000 [06:37<29:07, 36.75it/s]
iter 25799: test_acc:98.25%
29%|██▉ | 26405/90000 [06:47<28:48, 36.78it/s]
iter 26399: test_acc:97.97999572753906%
30%|███ | 27006/90000 [06:56<26:12, 40.05it/s]
iter 26999: test_acc:98.13999938964844%
31%|███ | 27608/90000 [07:05<24:26, 42.55it/s]
iter 27599: test_acc:98.18000030517578%
31%|███▏ | 28203/90000 [07:14<28:00, 36.78it/s]
iter 28199: test_acc:98.3699951171875%
32%|███▏ | 28805/90000 [07:24<28:04, 36.32it/s]
iter 28799: test_acc:98.13999938964844%
33%|███▎ | 29406/90000 [07:33<25:16, 39.97it/s]
iter 29399: test_acc:98.18999481201172%
33%|███▎ | 30008/90000 [07:42<23:40, 42.22it/s]
iter 29999: test_acc:98.0199966430664%
34%|███▍ | 30603/90000 [07:51<26:57, 36.72it/s]
iter 30599: test_acc:98.18000030517578%
35%|███▍ | 31205/90000 [08:01<26:43, 36.68it/s]
iter 31199: test_acc:98.30999755859375%
35%|███▌ | 31806/90000 [08:10<24:47, 39.12it/s]
iter 31799: test_acc:98.15999603271484%
36%|███▌ | 32408/90000 [08:19<22:42, 42.27it/s]
iter 32399: test_acc:98.13999938964844%
37%|███▋ | 33003/90000 [08:28<25:55, 36.65it/s]
iter 32999: test_acc:98.08999633789062%
37%|███▋ | 33605/90000 [08:38<25:41, 36.58it/s]
iter 33599: test_acc:98.16999816894531%
38%|███▊ | 34206/90000 [08:47<23:12, 40.08it/s]
iter 34199: test_acc:98.32999420166016%
39%|███▊ | 34808/90000 [08:56<21:36, 42.58it/s]
iter 34799: test_acc:98.25999450683594%
39%|███▉ | 35403/90000 [09:05<24:52, 36.57it/s]
iter 35399: test_acc:98.40999603271484%
40%|████ | 36005/90000 [09:15<24:31, 36.70it/s]
iter 35999: test_acc:98.18999481201172%
41%|████ | 36606/90000 [09:24<22:14, 40.02it/s]
iter 36599: test_acc:98.20999908447266%
41%|████▏ | 37208/90000 [09:33<20:49, 42.25it/s]
iter 37199: test_acc:98.19999694824219%
42%|████▏ | 37803/90000 [09:42<23:52, 36.44it/s]
iter 37799: test_acc:98.22999572753906%
43%|████▎ | 38405/90000 [09:52<23:26, 36.69it/s]
iter 38399: test_acc:98.25999450683594%
43%|████▎ | 39006/90000 [10:01<21:19, 39.87it/s]
iter 38999: test_acc:98.3499984741211%
44%|████▍ | 39608/90000 [10:10<19:47, 42.45it/s]
iter 39599: test_acc:98.50999450683594%
45%|████▍ | 40203/90000 [10:19<22:41, 36.58it/s]
iter 40199: test_acc:98.37999725341797%
45%|████▌ | 40805/90000 [10:29<22:32, 36.38it/s]
iter 40799: test_acc:98.31999969482422%
46%|████▌ | 41406/90000 [10:38<20:18, 39.87it/s]
iter 41399: test_acc:98.4000015258789%
47%|████▋ | 42008/90000 [10:47<18:48, 42.53it/s]
iter 41999: test_acc:98.22999572753906%
47%|████▋ | 42603/90000 [10:56<22:18, 35.41it/s]
iter 42599: test_acc:98.43000030517578%
48%|████▊ | 43205/90000 [11:06<21:24, 36.44it/s]
iter 43199: test_acc:98.3699951171875%
49%|████▊ | 43806/90000 [11:15<19:15, 39.96it/s]
iter 43799: test_acc:98.38999938964844%
49%|████▉ | 44408/90000 [11:24<17:56, 42.34it/s]
iter 44399: test_acc:98.37999725341797%
50%|█████ | 45003/90000 [11:34<20:40, 36.26it/s]
iter 44999: test_acc:98.3699951171875%
51%|█████ | 45605/90000 [11:43<20:20, 36.38it/s]
iter 45599: test_acc:98.47999572753906%
51%|█████▏ | 46206/90000 [11:52<19:15, 37.89it/s]
iter 46199: test_acc:98.36000061035156%
52%|█████▏ | 46804/90000 [12:02<19:53, 36.19it/s]
iter 46799: test_acc:98.36000061035156%
53%|█████▎ | 47406/90000 [12:11<19:33, 36.31it/s]
iter 47399: test_acc:98.33999633789062%
53%|█████▎ | 48008/90000 [12:20<16:30, 42.38it/s]
iter 47999: test_acc:98.37999725341797%
54%|█████▍ | 48603/90000 [12:29<18:52, 36.54it/s]
iter 48599: test_acc:98.44999694824219%
55%|█████▍ | 49205/90000 [12:39<18:34, 36.59it/s]
iter 49199: test_acc:98.40999603271484%
55%|█████▌ | 49806/90000 [12:48<16:52, 39.68it/s]
iter 49799: test_acc:98.56999969482422%
56%|█████▌ | 50408/90000 [12:57<15:40, 42.11it/s]
iter 50399: test_acc:98.52999877929688%
57%|█████▋ | 51003/90000 [13:06<17:46, 36.56it/s]
iter 50999: test_acc:98.44999694824219%
57%|█████▋ | 51605/90000 [13:16<17:39, 36.25it/s]
iter 51599: test_acc:98.5%
58%|█████▊ | 52206/90000 [13:25<15:58, 39.41it/s]
iter 52199: test_acc:98.52999877929688%
59%|█████▊ | 52808/90000 [13:35<14:43, 42.10it/s]
iter 52799: test_acc:98.32999420166016%
59%|█████▉ | 53403/90000 [13:44<16:44, 36.44it/s]
iter 53399: test_acc:98.47000122070312%
60%|██████ | 54005/90000 [13:53<16:32, 36.25it/s]
iter 53999: test_acc:98.44999694824219%
61%|██████ | 54606/90000 [14:02<14:52, 39.65it/s]
iter 54599: test_acc:98.47000122070312%
61%|██████▏ | 55201/90000 [14:11<16:01, 36.19it/s]
iter 55199: test_acc:98.5%
62%|██████▏ | 55803/90000 [14:21<15:36, 36.53it/s]
iter 55799: test_acc:98.5999984741211%
63%|██████▎ | 56405/90000 [14:30<15:28, 36.20it/s]
iter 56399: test_acc:98.48999786376953%
63%|██████▎ | 57006/90000 [14:39<13:55, 39.51it/s]
iter 56999: test_acc:98.58999633789062%
64%|██████▍ | 57608/90000 [14:49<12:50, 42.03it/s]
iter 57599: test_acc:98.4000015258789%
65%|██████▍ | 58203/90000 [14:58<14:31, 36.49it/s]
iter 58199: test_acc:98.47000122070312%
65%|██████▌ | 58805/90000 [15:07<14:19, 36.30it/s]
iter 58799: test_acc:98.43999481201172%
66%|██████▌ | 59406/90000 [15:16<12:50, 39.70it/s]
iter 59399: test_acc:98.47999572753906%
67%|██████▋ | 60008/90000 [15:26<11:52, 42.08it/s]
iter 59999: test_acc:98.54000091552734%
67%|██████▋ | 60603/90000 [15:35<13:27, 36.41it/s]
iter 60599: test_acc:98.55999755859375%
68%|██████▊ | 61205/90000 [15:44<13:10, 36.44it/s]
iter 61199: test_acc:98.55999755859375%
69%|██████▊ | 61806/90000 [15:53<11:54, 39.48it/s]
iter 61799: test_acc:98.55999755859375%
69%|██████▉ | 62408/90000 [16:03<10:58, 41.91it/s]
iter 62399: test_acc:98.54000091552734%
70%|███████ | 63003/90000 [16:12<12:25, 36.20it/s]
iter 62999: test_acc:98.58999633789062%
71%|███████ | 63605/90000 [16:21<12:10, 36.11it/s]
iter 63599: test_acc:98.54000091552734%
71%|███████▏ | 64206/90000 [16:30<10:50, 39.68it/s]
iter 64199: test_acc:98.61000061035156%
72%|███████▏ | 64808/90000 [16:40<10:02, 41.83it/s]
iter 64799: test_acc:98.58999633789062%
73%|███████▎ | 65403/90000 [16:49<11:18, 36.24it/s]
iter 65399: test_acc:98.66999816894531%
73%|███████▎ | 66005/90000 [16:58<11:00, 36.33it/s]
iter 65999: test_acc:98.5999984741211%
74%|███████▍ | 66606/90000 [17:07<09:51, 39.58it/s]
iter 66599: test_acc:98.72000122070312%
75%|███████▍ | 67208/90000 [17:17<09:01, 42.09it/s]
iter 67199: test_acc:98.68999481201172%
75%|███████▌ | 67803/90000 [17:26<10:12, 36.23it/s]
iter 67799: test_acc:98.5999984741211%
76%|███████▌ | 68405/90000 [17:35<09:55, 36.24it/s]
iter 68399: test_acc:98.66999816894531%
77%|███████▋ | 69006/90000 [17:45<08:48, 39.72it/s]
iter 68999: test_acc:98.65999603271484%
77%|███████▋ | 69608/90000 [17:54<08:05, 42.04it/s]
iter 69599: test_acc:98.63999938964844%
78%|███████▊ | 70203/90000 [18:03<09:03, 36.45it/s]
iter 70199: test_acc:98.62999725341797%
79%|███████▊ | 70805/90000 [18:12<08:49, 36.25it/s]
iter 70799: test_acc:98.6500015258789%
79%|███████▉ | 71406/90000 [18:22<07:50, 39.55it/s]
iter 71399: test_acc:98.68000030517578%
80%|████████ | 72008/90000 [18:31<07:07, 42.11it/s]
iter 71999: test_acc:98.68999481201172%
81%|████████ | 72603/90000 [18:40<07:57, 36.43it/s]
iter 72599: test_acc:98.66999816894531%
81%|████████▏ | 73205/90000 [18:49<07:45, 36.10it/s]
iter 73199: test_acc:98.62999725341797%
82%|████████▏ | 73806/90000 [18:59<06:49, 39.59it/s]
iter 73799: test_acc:98.68000030517578%
83%|████████▎ | 74408/90000 [19:08<06:12, 41.90it/s]
iter 74399: test_acc:98.58999633789062%
83%|████████▎ | 75003/90000 [19:17<06:50, 36.54it/s]
iter 74999: test_acc:98.61000061035156%
84%|████████▍ | 75605/90000 [19:27<06:36, 36.31it/s]
iter 75599: test_acc:98.72999572753906%
85%|████████▍ | 76206/90000 [19:36<05:48, 39.58it/s]
iter 76199: test_acc:98.63999938964844%
85%|████████▌ | 76808/90000 [19:45<05:14, 42.00it/s]
iter 76799: test_acc:98.73999786376953%
86%|████████▌ | 77403/90000 [19:54<05:46, 36.34it/s]
iter 77399: test_acc:98.72999572753906%
87%|████████▋ | 78005/90000 [20:04<05:30, 36.34it/s]
iter 77999: test_acc:98.75%
87%|████████▋ | 78606/90000 [20:13<04:51, 39.11it/s]
iter 78599: test_acc:98.70999908447266%
88%|████████▊ | 79201/90000 [20:22<05:00, 35.88it/s]
iter 79199: test_acc:98.76000213623047%
89%|████████▊ | 79803/90000 [20:31<04:40, 36.29it/s]
iter 79799: test_acc:98.75%
89%|████████▉ | 80405/90000 [20:41<04:24, 36.21it/s]
iter 80399: test_acc:98.79999542236328%
90%|█████████ | 81006/90000 [20:50<03:48, 39.31it/s]
iter 80999: test_acc:98.6500015258789%
91%|█████████ | 81608/90000 [20:59<03:21, 41.61it/s]
iter 81599: test_acc:98.72999572753906%
91%|█████████▏| 82203/90000 [21:08<03:36, 36.00it/s]
iter 82199: test_acc:98.69999694824219%
92%|█████████▏| 82805/90000 [21:18<03:19, 36.08it/s]
iter 82799: test_acc:98.65999603271484%
93%|█████████▎| 83406/90000 [21:27<02:47, 39.38it/s]
iter 83399: test_acc:98.73999786376953%
93%|█████████▎| 84008/90000 [21:36<02:22, 41.97it/s]
iter 83999: test_acc:98.72000122070312%
94%|█████████▍| 84603/90000 [21:45<02:28, 36.32it/s]
iter 84599: test_acc:98.75%
95%|█████████▍| 85205/90000 [21:55<02:13, 36.01it/s]
iter 85199: test_acc:98.66999816894531%
95%|█████████▌| 85806/90000 [22:04<01:46, 39.38it/s]
iter 85799: test_acc:98.72000122070312%
96%|█████████▌| 86408/90000 [22:13<01:25, 42.06it/s]
iter 86399: test_acc:98.79000091552734%
97%|█████████▋| 87003/90000 [22:22<01:22, 36.24it/s]
iter 86999: test_acc:98.7699966430664%
97%|█████████▋| 87605/90000 [22:32<01:05, 36.34it/s]
iter 87599: test_acc:98.79000091552734%
98%|█████████▊| 88206/90000 [22:41<00:45, 39.42it/s]
iter 88199: test_acc:98.76000213623047%
99%|█████████▊| 88808/90000 [22:50<00:28, 42.12it/s]
iter 88799: test_acc:98.7699966430664%
99%|█████████▉| 89403/90000 [22:59<00:16, 35.64it/s]
iter 89399: test_acc:98.79999542236328%
100%|██████████| 90000/90000 [23:09<00:00, 64.79it/s]
iter 89999: test_acc:98.79000091552734%
print( "Final Accuracy: {}".format(sess.run(accuracy,
feed_dict={
inputs: mnist.semi_test.images,
outputs: mnist.semi_test.labels,
training: False}), "%" ) )
sess.close()
Final Accuracy: 98.79000091552734
show_computational_graph(tf.get_default_graph())