import sys; sys.path.append("/usr/local/lib/python2.7/dist-packages")
"""
Convolutional Neural Network (CNN) with Custom Data + vgg finetune
@Sungjoon Choi (sungjoon.choi@cpslab.snu.ac.kr)
"""
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import scipy.misc
import scipy.io
from tensorflow.examples.tutorials.mnist import input_data
%matplotlib inline
print ("Packages loaded.")
Packages loaded.
import datetime
start_time = current_time = datetime.datetime.now()
# Load data
cwd = "/home/ubuntu/work/data/tensor101"
loadpath = cwd + "/data4vgg.npz"
l = np.load(loadpath)
# Show Files
print (l.files)
# Parse data
trainimg = l['trainimg']
trainlabel = l['trainlabel']
testimg = l['testimg']
testlabel = l['testlabel']
# print trainimg
print trainimg[0]
# print trainlabel
# print testimg
# print testlabel
ntrain = trainimg.shape[0]
nclass = testlabel.shape[1]
dim = trainimg.shape[1]
ntest = testimg.shape[0]
print ("%d train images loaded" % (ntrain))
print ("%d test images loaded" % (ntest))
print ("%d dimensional input" % (dim))
print ("%d classes" % (nclass))
['trainlabel', 'trainimg', 'testimg', 'testlabel'] [ 1. 1. 1. ..., 1. 1. 1.] 69 train images loaded 18 test images loaded 37632 dimensional input 2 classes
#why chosen it this number ?
# --> https://en.wikipedia.org/wiki/RGB_color_model#/media/File:Rgb-compose-Alim_Khan.jpg
trainimg_tensor = np.ndarray((ntrain, 112, 112, 3))
print trainimg[1, :]
for i in range(ntrain):
currimg = trainimg[i, :]
# what mean "reshape"
# --> 각 tensor의 값 변경, 어떤 로직에 의해서?
# --> # 37632 개의 dimension을 W x H x D 형식으로 변경
currimg = np.reshape(currimg, [112, 112, 3])
trainimg_tensor[i, :, :, :] = currimg
print ("shape of trainimg_tensor is %s" % (trainimg_tensor.shape,))
print trainimg_tensor
testimg_tensor = np.ndarray((ntest, 112, 112, 3))
for i in range(ntest):
currimg = testimg[i, :]
currimg = np.reshape(currimg, [112, 112, 3])
testimg_tensor[i, :, :, :] = currimg
print ("shape of testimg_tensor is %s" % (testimg_tensor.shape,))
[ 0.21568627 0.23137255 0.03137255 ..., 0.90588235 0.78823529 0.67843137] shape of trainimg_tensor is (69, 112, 112, 3) [[[[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] ..., [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]]] [[[ 0.21568627 0.23137255 0.03137255] [ 0.21960784 0.23921569 0.03529412] [ 0.22745098 0.24705882 0.03529412] ..., [ 0.55686275 0.59607843 0.30980392] [ 0.54901961 0.58039216 0.29803922] [ 0.55294118 0.57647059 0.29411765]] [[ 0.19215686 0.21960784 0.01568627] [ 0.19607843 0.22352941 0.01960784] [ 0.20392157 0.23137255 0.02352941] ..., [ 0.55294118 0.59215686 0.30980392] [ 0.54901961 0.58431373 0.30588235] [ 0.55294118 0.57647059 0.30196078]] [[ 0.17647059 0.20392157 0.00392157] [ 0.18039216 0.20784314 0.00784314] [ 0.18823529 0.21568627 0.01176471] ..., [ 0.54509804 0.58431373 0.31372549] [ 0.54901961 0.58431373 0.31372549] [ 0.55686275 0.57647059 0.31372549]] ..., [[ 0.19215686 0.22745098 0.05882353] [ 0.18039216 0.21568627 0.04705882] [ 0.16862745 0.20392157 0.03529412] ..., [ 0.90980392 0.81568627 0.72941176] [ 0.90980392 0.80392157 0.69803922] [ 0.92156863 0.80784314 0.69019608]] [[ 0.2 0.23137255 0.0627451 ] [ 0.18823529 0.21960784 0.05098039] [ 0.17254902 0.20784314 0.03921569] ..., [ 0.91372549 0.81568627 0.7254902 ] [ 0.90588235 0.79607843 0.69411765] [ 0.90980392 0.79215686 0.67843137]] [[ 0.21176471 0.23921569 0.06666667] [ 0.19607843 0.22352941 0.05098039] [ 0.18039216 0.20784314 0.03529412] ..., [ 0.93333333 0.83137255 0.73333333] [ 0.90980392 0.80784314 0.70196078] [ 0.90588235 0.78823529 0.67843137]]] [[[ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] ..., [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686]] [[ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] ..., [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686]] [[ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] ..., [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686] [ 0.99215686 0.99215686 0.99215686]] ..., [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]]] ..., [[[ 0.67843137 0.68627451 0.68235294] [ 0.67843137 0.68627451 0.68235294] [ 0.67843137 0.68627451 0.68235294] ..., [ 0.82745098 0.85882353 0.87058824] [ 0.82745098 0.85882353 0.87058824] [ 0.82352941 0.85490196 0.86666667]] [[ 0.68235294 0.69019608 0.68627451] [ 0.68235294 0.69019608 0.68627451] [ 0.68235294 0.69019608 0.68627451] ..., [ 0.82745098 0.85882353 0.87058824] [ 0.82745098 0.85882353 0.87058824] [ 0.82352941 0.85490196 0.86666667]] [[ 0.67843137 0.69411765 0.69019608] [ 0.67843137 0.69411765 0.69019608] [ 0.67843137 0.69411765 0.69019608] ..., [ 0.82745098 0.85882353 0.87058824] [ 0.82745098 0.85882353 0.87058824] [ 0.82352941 0.85490196 0.86666667]] ..., [[ 0.88235294 0.89411765 0.92156863] [ 0.88627451 0.89803922 0.9254902 ] [ 0.89019608 0.90196078 0.92941176] ..., [ 0.74509804 0.74117647 0.71372549] [ 0.7254902 0.72156863 0.70196078] [ 0.7372549 0.74117647 0.72156863]] [[ 0.88235294 0.89411765 0.92156863] [ 0.88627451 0.89803922 0.9254902 ] [ 0.89019608 0.90196078 0.92941176] ..., [ 0.72156863 0.72156863 0.69019608] [ 0.7372549 0.7372549 0.71764706] [ 0.78039216 0.78431373 0.76470588]] [[ 0.88235294 0.89411765 0.92156863] [ 0.88627451 0.89803922 0.9254902 ] [ 0.89019608 0.90196078 0.92941176] ..., [ 0.70196078 0.69803922 0.67058824] [ 0.75686275 0.75686275 0.7372549 ] [ 0.78039216 0.78431373 0.76470588]]] [[[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] ..., [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]] [[ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ] ..., [ 1. 1. 1. ] [ 1. 1. 1. ] [ 1. 1. 1. ]]] [[[ 0.2745098 0.27843137 0.29411765] [ 0.29019608 0.29411765 0.30588235] [ 0.30588235 0.30980392 0.31764706] ..., [ 0.56078431 0.53333333 0.50980392] [ 0.52941176 0.50196078 0.47843137] [ 0.50588235 0.47843137 0.45490196]] [[ 0.27843137 0.28235294 0.29803922] [ 0.29411765 0.29803922 0.30980392] [ 0.30980392 0.31372549 0.32156863] ..., [ 0.56862745 0.54117647 0.51764706] [ 0.54117647 0.51372549 0.49019608] [ 0.51372549 0.48627451 0.4627451 ]] [[ 0.28235294 0.28627451 0.30196078] [ 0.29803922 0.30196078 0.31372549] [ 0.31372549 0.32156863 0.32156863] ..., [ 0.58431373 0.55686275 0.53333333] [ 0.55294118 0.5254902 0.50196078] [ 0.5254902 0.49803922 0.4745098 ]] ..., [[ 0.25882353 0.23137255 0.20784314] [ 0.23921569 0.21568627 0.18823529] [ 0.21568627 0.18823529 0.16470588] ..., [ 0.35686275 0.33333333 0.34117647] [ 0.34901961 0.3254902 0.33333333] [ 0.34117647 0.31764706 0.3254902 ]] [[ 0.24705882 0.22745098 0.21568627] [ 0.23921569 0.21960784 0.20784314] [ 0.23137255 0.21176471 0.19607843] ..., [ 0.34901961 0.3254902 0.33333333] [ 0.3372549 0.31372549 0.32156863] [ 0.32941176 0.30588235 0.31372549]] [[ 0.24705882 0.21960784 0.21176471] [ 0.24705882 0.21960784 0.21176471] [ 0.24705882 0.21960784 0.21176471] ..., [ 0.34117647 0.31764706 0.3254902 ] [ 0.3372549 0.31372549 0.32156863] [ 0.3254902 0.30196078 0.30980392]]]] shape of testimg_tensor is (18, 112, 112, 3)
def net(data_path, input_image, data):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
print data.keys()
mean = data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = data['layers'][0]
# print ("mean_data :", data)
# print ("mean :", mean)
# print ("mean_pixel:", mean_pixel)
# print ("weights:", weights)
net = {}
current = input_image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = _conv_layer(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = _pool_layer(current)
net[name] = current
assert len(net) == len(layers)
return net, mean_pixel
def _conv_layer(input, weights, bias):
conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),
padding='SAME')
return tf.nn.bias_add(conv, bias)
def _pool_layer(input):
return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
# def preprocess(image, mean_pixel):
# return image - mean_pixel
def unprocess(image, mean_pixel):
return image + mean_pixel
# Preprocess
# what mean this file ?
# -- > pre-trained model
# url=http://www.vlfeat.org/matconvnet/pretrained/
cwd = "/home/ubuntu/work/data/tensor101"
VGG_PATH = cwd + "/imagenet-vgg-verydeep-19.mat"
# 해당 데이터를 dictionary 형태로 가져옴
data = scipy.io.loadmat(VGG_PATH)
with tf.Graph().as_default(), tf.Session() as sess:
with tf.device("/gpu:0"):
img_placeholder = tf.placeholder(tf.float32, shape=(None, 112, 112, 3))
net, mean_pixel = net(VGG_PATH, img_placeholder, data)
train_features = net['relu5_4'].eval(feed_dict={img_placeholder: trainimg_tensor})
test_features = net['relu5_4'].eval(feed_dict={img_placeholder: testimg_tensor})
print("Done")
['layers', '__header__', '__globals__', 'classes', '__version__', 'normalization'] Done
# Vectorize
# what it's for ?
# why chosen it this number '7 * 7* 512'?
# --> 해당 상태에서의 W x H x D, W, H : spatial dim, D : filter
train_vectorized = np.ndarray((ntrain, 7*7*512))
test_vectorized = np.ndarray((ntest, 7*7*512))
for i in range(ntrain):
curr_feat = train_features[i, :, :, :]
# print curr_feat
curr_feat_vec = np.reshape(curr_feat, (1, -1))
print curr_feat_vec
train_vectorized[i, :] = curr_feat_vec
for i in range(ntest):
curr_feat = test_features[i, :, :, :]
curr_feat_vec = np.reshape(curr_feat, (1, -1))
test_vectorized[i, :] = curr_feat_vec
[[ 0.2725141 0. 0. ..., 0. 0.5417695 0. ]] [[ 0.23936665 0. 0. ..., 0.76455289 0.66450101 0. ]] [[ 0. 0. 0. ..., 0. 1.20481074 0. ]] [[ 0.3566516 0. 0. ..., 0. 1.07389152 0. ]] [[ 0. 0. 0. ..., 0. 0.98639703 0. ]] [[ 0.05062497 0. 0. ..., 0. 0.85438609 0. ]] [[ 0. 0. 0. ..., 0. 1.31566834 0. ]] [[ 0.3908186 0. 0. ..., 0. 0.60115963 0. ]] [[ 0.42308086 0. 0. ..., 0. 1.11069465 0. ]] [[ 0.53053486 0. 0. ..., 0. 0.83186638 0. ]] [[ 0.70621794 0. 0. ..., 0. 1.29327285 0. ]] [[ 0.02222145 0. 0. ..., 0. 1.09890938 0. ]] [[ 0.92214763 0. 0. ..., 0. 1.33390403 0. ]] [[ 0.3908186 0. 0. ..., 0. 0.60115963 0. ]] [[ 0. 0. 0. ..., 0. 1.05346179 0. ]] [[ 0. 0. 0. ..., 0. 0.70307922 0. ]] [[ 0.32868397 0. 0. ..., 0. 0.74721891 0. ]] [[ 0. 0. 0. ..., 0. 1.15589237 0. ]] [[ 0. 0. 0. ..., 0. 1.30879462 0. ]] [[ 0.70621794 0. 0. ..., 0. 1.29327285 0. ]] [[ 0. 0. 0. ..., 0. 1.20481074 0. ]] [[ 0.00287741 0. 0. ..., 0.22585499 1.20151281 0. ]] [[ 0. 0. 0. ..., 0. 1.07684088 0. ]] [[ 0. 0. 0. ..., 0. 1.14898717 0. ]] [[ 0.18497431 0. 0.33146423 ..., 0. 1.32934356 0. ]] [[ 0.24674848 0. 0. ..., 0. 1.46584582 0. ]] [[ 0.23936665 0. 0. ..., 0.76455289 0.66450101 0. ]] [[ 0.3908186 0. 0. ..., 0. 0.60115963 0. ]] [[ 0.2553868 0. 0. ..., 0. 1.13530517 0. ]] [[ 0.31064874 0. 0. ..., 0. 0.67818612 0. ]] [[ 0. 0. 0. ..., 0. 0.90537 0. ]] [[ 0. 0. 0. ..., 0. 1.23333979 0. ]] [[ 0.52296239 0. 0. ..., 0. 0.81389987 0. ]] [[ 0.15847492 0. 0. ..., 0. 1.18498302 0. ]] [[ 0.12916225 0. 0. ..., 0. 1.12877703 0. ]] [[ 0.70621794 0. 0. ..., 0. 1.29327285 0. ]] [[ 0. 0. 0. ..., 0. 0.77003402 0. ]] [[ 0.27893245 0. 0. ..., 0. 1.01965737 0. ]] [[ 0.70621794 0. 0. ..., 0. 1.29327285 0. ]] [[ 0.12916225 0. 0. ..., 0. 1.12877703 0. ]] [[ 0. 0. 0. ..., 0. 1.12813234 0. ]] [[ 0. 0. 0. ..., 0. 0.7838189 0. ]] [[ 0. 0. 0. ..., 0. 0.79031706 0. ]] [[ 0. 0. 0. ..., 0. 1.20481074 0. ]] [[ 0. 0. 0. ..., 0. 1.2606703 0. ]] [[ 0. 0. 0. ..., 0. 0.98639703 0. ]] [[ 0.49953866 0. 0. ..., 0. 1.38397121 0. ]] [[ 0.18497431 0. 0.33146423 ..., 0. 1.32934356 0. ]] [[ 0. 0. 0. ..., 0. 1.02983284 0. ]] [[ 0.62225693 0. 0. ..., 0. 0.85419577 0. ]] [[ 0.12916225 0. 0. ..., 0. 1.12877703 0. ]] [[ 0. 0. 0. ..., 0. 0.95535022 0. ]] [[ 0.32868397 0. 0. ..., 0. 0.74721891 0. ]] [[ 0.00287741 0. 0. ..., 0.22585499 1.20151281 0. ]] [[ 0. 0. 0. ..., 0. 1.30879462 0. ]] [[ 0.00437176 0. 0. ..., 0. 0.89721721 0. ]] [[ 0. 0. 0. ..., 0. 0.97568959 0. ]] [[ 0.15289217 0. 0.5165931 ..., 0. 0.75865793 0. ]] [[ 0.32868397 0. 0. ..., 0. 0.74721891 0. ]] [[ 0.31064874 0. 0. ..., 0. 0.67818612 0. ]] [[ 0.78948247 0. 0. ..., 0. 0.85414094 0. ]] [[ 0.53053486 0. 0. ..., 0. 0.83186638 0. ]] [[ 0.78948247 0. 0. ..., 0. 0.85414094 0. ]] [[ 0.64563245 0. 0. ..., 0. 0.68578309 0. ]] [[ 0.18499285 0. 0.01233187 ..., 0. 0.35886678 0. ]] [[ 0.78948247 0. 0. ..., 0. 0.85414094 0. ]] [[ 0.18567324 0. 0. ..., 0. 0.98543584 0. ]] [[ 0. 0. 0. ..., 0. 1.15589237 0. ]] [[ 0.24674848 0. 0. ..., 0. 1.46584582 0. ]]
# Parameters
# about learning_rate : https://github.com/seojey79/eml/blob/master/MNIST_perf.xlsx
learning_rate = 0.0001
training_epochs = 100
batch_size = 100
display_step = 10
# Network
# what is "tf.device" ?
# --> using device
# --> avg(sec) : 88 vs 20
with tf.device("/gpu:0"):
n_input = dim
n_output = nclass
weights = {
'wd1': tf.Variable(tf.random_normal([7*7*512, 1024], stddev=0.1)),
'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))
}
biases = {
'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)),
'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}
def conv_basic(_input, _w, _b, _keepratio):
# Input
_input_r = _input
# Vectorize
_dense1 = tf.reshape(_input_r, [-1, _w['wd1'].get_shape().as_list()[0]])
# Fc1
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
# Fc2
_out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
# Return everything
out = {'input_r': _input_r, 'dense1': _dense1,
'fc1': _fc1, 'fc_dr1': _fc_dr1, 'out': _out }
return out
# tf Graph input
x = tf.placeholder(tf.float32, [None, 7*7*512])
y = tf.placeholder(tf.float32, [None, n_output])
keepratio = tf.placeholder(tf.float32)
# Functions!
with tf.device("/gpu:0"):
_pred = conv_basic(x, weights, biases, keepratio)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))
optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects
accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy
init = tf.initialize_all_variables()
print ("Network Ready to Go!")
Network Ready to Go!
# Launch the graph
sess = tf.Session()
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(ntrain/batch_size)+1
# Loop over all batches
for i in range(num_batch):
randidx = np.random.randint(ntrain, size=batch_size)
batch_xs = train_vectorized[randidx, :]
batch_ys = trainlabel[randidx, :]
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio:0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})/num_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: test_vectorized, y: testlabel, keepratio:1.})
print (" Test accuracy: %.3f" % (test_acc))
end_time = current_time = datetime.datetime.now()
print ("Optimization Finished!, spent time : " , (end_time - start_time).total_seconds())
Epoch: 000/100 cost: 10.932435989 Training accuracy: 0.180 Test accuracy: 0.389 Epoch: 010/100 cost: 1.962701321 Training accuracy: 0.750 Test accuracy: 0.500 Epoch: 020/100 cost: 0.000944962 Training accuracy: 1.000 Test accuracy: 0.833 Epoch: 030/100 cost: 0.000020276 Training accuracy: 1.000 Test accuracy: 0.833 Epoch: 040/100 cost: 0.000000029 Training accuracy: 1.000 Test accuracy: 0.944 Epoch: 050/100 cost: 0.000000001 Training accuracy: 1.000 Test accuracy: 0.944 Epoch: 060/100 cost: 0.000000000 Training accuracy: 1.000 Test accuracy: 0.944 Epoch: 070/100 cost: 0.000000000 Training accuracy: 1.000 Test accuracy: 0.944 Epoch: 080/100 cost: 0.000000000 Training accuracy: 1.000 Test accuracy: 0.944 Epoch: 090/100 cost: 0.000000000 Training accuracy: 1.000 Test accuracy: 0.944 ('Optimization Finished!, spent time : ', 21991.86618)