import numpy as np
import tensorflow as tf
print ("PACKAGES LOADED")
PACKAGES LOADED
A session is the basic building block for running machine learning and tensor flow operations.
Here we create a new session called sess
sess = tf.Session()
print ("OPEN SESSION")
OPEN SESSION
Now that we have a session, we can define some tensor flow . The simpliest is the constant
hello = tf.constant("Hello World")
print(type(hello))
print(hello)
print(sess.run(hello))
<class 'tensorflow.python.framework.ops.Tensor'> Tensor("Const:0", shape=(), dtype=string) b'Hello World'
We can also define floats and ints
a = tf.constant(1.5)
b = tf.constant(int(2))
print(a)
print(b)
c = tf.constant(2.5)
print(c)
Tensor("Const_1:0", shape=(), dtype=float32) Tensor("Const_2:0", shape=(), dtype=int32) Tensor("Const_3:0", shape=(), dtype=float32)
print(sess.run(a*c))
d = tf.add(a,c)
e = tf.multiply(a,c)
print(d,a+c,e,a*c)
print(sess.run(d),sess.run(a+c),sess.run(e),sess.run(a*c))
0.14293435 Tensor("Add_3:0", shape=(), dtype=float32) Tensor("add_4:0", shape=(), dtype=float32) Tensor("Mul_1:0", shape=(), dtype=float32) Tensor("mul_5:0", shape=(), dtype=float32) 1.5952896 1.5952896 0.14293435 0.14293435
We can define variables:
weight = tf.Variable(tf.random_normal([5,2], stddev=0.1))
print(weight)
<tf.Variable 'Variable:0' shape=(5, 2) dtype=float32_ref>
print(sess.run(weight))
--------------------------------------------------------------------------- FailedPreconditionError Traceback (most recent call last) ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 1349 try: -> 1350 return fn(*args) 1351 except errors.OpError as e: ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata) 1328 feed_dict, fetch_list, target_list, -> 1329 status, run_metadata) 1330 ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg) 472 compat.as_text(c_api.TF_Message(self.status.status)), --> 473 c_api.TF_GetCode(self.status.status)) 474 # Delete the underlying status object from memory otherwise it stays alive FailedPreconditionError: Attempting to use uninitialized value Variable [[Node: _retval_Variable_0_0 = _Retval[T=DT_FLOAT, index=0, _device="/job:localhost/replica:0/task:0/device:CPU:0"](Variable)]] During handling of the above exception, another exception occurred: FailedPreconditionError Traceback (most recent call last) <ipython-input-63-d5c1f80f8cc2> in <module>() ----> 1 print(sess.run(weight)) ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata) 893 try: 894 result = self._run(None, fetches, feed_dict, options_ptr, --> 895 run_metadata_ptr) 896 if run_metadata: 897 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata) 1126 if final_fetches or final_targets or (handle and feed_dict_tensor): 1127 results = self._do_run(handle, final_targets, final_fetches, -> 1128 feed_dict_tensor, options, run_metadata) 1129 else: 1130 results = [] ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata) 1342 if handle is None: 1343 return self._do_call(_run_fn, self._session, feeds, fetches, targets, -> 1344 options, run_metadata) 1345 else: 1346 return self._do_call(_prun_fn, self._session, handle, feeds, fetches) ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 1361 except KeyError: 1362 pass -> 1363 raise type(e)(node_def, op, message) 1364 1365 def _extend_graph(self): FailedPreconditionError: Attempting to use uninitialized value Variable [[Node: _retval_Variable_0_0 = _Retval[T=DT_FLOAT, index=0, _device="/job:localhost/replica:0/task:0/device:CPU:0"](Variable)]]
Variables have to be initialized.
tf.variables_initializer takes as input a list of variables and returns a function which can be run which will initialize your variables.
init = tf.variables_initializer([weight])
sess.run(init)
print(sess.run(weight))
[[-0.02364037 -0.2272053 ] [-0.08885183 0.15246117] [-0.06574409 -0.26573423] [ 0.04045184 -0.13918498] [-0.04854376 -0.04594936]]
Creates a placeholder for a variable which can be/will be inputted later. Similar to initializing variables in C++ and Fortran, but we can perform operations on them.
x = tf.placeholder(tf.float32, shape=(1024,1024))
y = tf.matmul(x,x)
print(sess.run(y))
--------------------------------------------------------------------------- InvalidArgumentError Traceback (most recent call last) ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 1349 try: -> 1350 return fn(*args) 1351 except errors.OpError as e: ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata) 1328 feed_dict, fetch_list, target_list, -> 1329 status, run_metadata) 1330 ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg) 472 compat.as_text(c_api.TF_Message(self.status.status)), --> 473 c_api.TF_GetCode(self.status.status)) 474 # Delete the underlying status object from memory otherwise it stays alive InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_5' with dtype float and shape [1024,1024] [[Node: Placeholder_5 = Placeholder[dtype=DT_FLOAT, shape=[1024,1024], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]] During handling of the above exception, another exception occurred: InvalidArgumentError Traceback (most recent call last) <ipython-input-66-712283970605> in <module>() ----> 1 print(sess.run(y)) ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata) 893 try: 894 result = self._run(None, fetches, feed_dict, options_ptr, --> 895 run_metadata_ptr) 896 if run_metadata: 897 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata) 1126 if final_fetches or final_targets or (handle and feed_dict_tensor): 1127 results = self._do_run(handle, final_targets, final_fetches, -> 1128 feed_dict_tensor, options, run_metadata) 1129 else: 1130 results = [] ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata) 1342 if handle is None: 1343 return self._do_call(_run_fn, self._session, feeds, fetches, targets, -> 1344 options, run_metadata) 1345 else: 1346 return self._do_call(_prun_fn, self._session, handle, feeds, fetches) ~/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 1361 except KeyError: 1362 pass -> 1363 raise type(e)(node_def, op, message) 1364 1365 def _extend_graph(self): InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_5' with dtype float and shape [1024,1024] [[Node: Placeholder_5 = Placeholder[dtype=DT_FLOAT, shape=[1024,1024], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]] Caused by op 'Placeholder_5', defined at: File "/home/peter/anaconda3/envs/ml/lib/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/home/peter/anaconda3/envs/ml/lib/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module> app.launch_new_instance() File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance app.start() File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 486, in start self.io_loop.start() File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/zmq/eventloop/ioloop.py", line 177, in start super(ZMQIOLoop, self).start() File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/tornado/ioloop.py", line 888, in start handler_func(fd_obj, events) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events self._handle_recv() File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv self._run_callback(callback, msg) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback callback(*args, **kwargs) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher return self.dispatch_shell(stream, msg) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell handler(stream, idents, msg) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request user_expressions, allow_stdin) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 208, in do_execute res = shell.run_cell(code, store_history=store_history, silent=silent) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 537, in run_cell return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2728, in run_cell interactivity=interactivity, compiler=compiler, result=result) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2850, in run_ast_nodes if self.run_code(code, result): File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2910, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-65-52f68a61a488>", line 1, in <module> x = tf.placeholder(tf.float32, shape=(1024,1024)) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1680, in placeholder return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 3141, in _placeholder "Placeholder", dtype=dtype, shape=shape, name=name) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper op_def=op_def) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3160, in create_op op_def=op_def) File "/home/peter/anaconda3/envs/ml/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1625, in __init__ self._traceback = self._graph._extract_stack() # pylint: disable=protected-access InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_5' with dtype float and shape [1024,1024] [[Node: Placeholder_5 = Placeholder[dtype=DT_FLOAT, shape=[1024,1024], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
Error due to not passing in values for the placeholder.
rand_array = np.random.rand(1024,1024)
print(sess.run(y, feed_dict={x:rand_array}))
[[256.2324 254.2128 255.95805 ... 243.20882 258.11218 256.19434] [259.64276 256.7375 257.56598 ... 248.86572 259.149 258.67688] [255.29286 254.2767 250.303 ... 240.82176 258.69873 247.91837] ... [253.73787 258.95325 254.40843 ... 242.09763 256.19852 254.54424] [251.51746 250.78815 257.00198 ... 239.89223 253.83813 248.19058] [261.3467 256.50253 260.25446 ... 250.3718 264.7775 253.50763]]
Now that we have a tiny experience with tensorflow, we are going to use this to and train a linear regresion.
sess.close()
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#Define some paramters
learningRate = 0.01
trainingEpochs = 1000
displayStep = 50
xData = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
yData = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
dataPoints = xData.shape[0]
print(type(xData))
print(xData)
<class 'numpy.ndarray'> [ 3.3 4.4 5.5 6.71 6.93 4.168 9.779 6.182 7.59 2.167 7.042 10.791 5.313 7.997 5.654 9.27 3.1 ]
#Define tensorflow graph
X = tf.placeholder("float")
Y = tf.placeholder("float")
#Define model variables to be solved for
W = tf.Variable(np.random.randn(), name = 'slope')
b = tf.Variable(np.random.randn(), name = 'y-intercept')
model = W*X + b
# Define cost function
cost = tf.reduce_sum(tf.pow(model - Y,2)/(2*dataPoints))
#Set up gradient descent
optimizer = tf.train.GradientDescentOptimizer(learningRate).minimize(cost)
init = tf.variables_initializer([W, b])
sess = tf.Session()
sess.run(init) # initialize variables
iteration = []
costs = []
for epoch in range(trainingEpochs):
for (x, y) in zip(xData,yData):
sess.run(optimizer, feed_dict={X:x, Y:y}) #Run gradient descent for each data point
if (epoch+1) % displayStep == 0:
c = sess.run(cost, feed_dict={X:xData, Y:yData})
iteration.append(epoch+1)
costs.append(c)
plt.plot(np.asarray(iteration), np.asarray(costs))
plt.show()
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
Epoch: 0050 cost= 0.099404618 W= 0.33343473 b= 0.19831929
Epoch: 0100 cost= 0.096810795 W= 0.32844824 b= 0.23419142
Epoch: 0150 cost= 0.094516754 W= 0.32375845 b= 0.2679296
Epoch: 0200 cost= 0.092487819 W= 0.31934756 b= 0.29966196
Epoch: 0250 cost= 0.090693466 W= 0.31519893 b= 0.3295068
Epoch: 0300 cost= 0.089106530 W= 0.31129706 b= 0.35757622
Epoch: 0350 cost= 0.087703012 W= 0.3076271 b= 0.38397738
Epoch: 0400 cost= 0.086461850 W= 0.3041756 b= 0.40880767
Epoch: 0450 cost= 0.085364223 W= 0.30092937 b= 0.43216053
Epoch: 0500 cost= 0.084393531 W= 0.29787618 b= 0.45412517
Epoch: 0550 cost= 0.083535105 W= 0.2950045 b= 0.47478375
Epoch: 0600 cost= 0.082776025 W= 0.29230362 b= 0.49421328
Epoch: 0650 cost= 0.082104772 W= 0.2897635 b= 0.51248676
Epoch: 0700 cost= 0.081511214 W= 0.28737453 b= 0.529673
Epoch: 0750 cost= 0.080986336 W= 0.28512758 b= 0.545838
Epoch: 0800 cost= 0.080522239 W= 0.283014 b= 0.5610422
Epoch: 0850 cost= 0.080111854 W= 0.28102627 b= 0.57534194
Epoch: 0900 cost= 0.079748973 W= 0.27915674 b= 0.58879125
Epoch: 0950 cost= 0.079428151 W= 0.2773982 b= 0.6014421
Epoch: 1000 cost= 0.079144478 W= 0.2757443 b= 0.61334014 Optimization Finished!
training_cost = sess.run(cost, feed_dict={X: xData, Y: yData})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
#Graphic display
plt.plot(xData, yData, 'ro', label='Original data')
plt.plot(xData, sess.run(W) * xData + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
Training cost= 0.07914448 W= 0.2757443 b= 0.61334014
sess.close()
We will use the MNIST handwritten data to train a neural network in Tensorflow. Hooray!!! In this case we will create a neural network with 2 hidden layers. The first with 256 neurons and the second with 256 neurons.
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
%matplotlib inline
print ("PACKAGES LOADED")
PACKAGES LOADED
mnist = input_data.read_data_sets('data/', one_hot=True) #Download MNIST data set
from IPython.display import Image
Image("mnistSample.png")
Extracting data/train-images-idx3-ubyte.gz Extracting data/train-labels-idx1-ubyte.gz Extracting data/t10k-images-idx3-ubyte.gz Extracting data/t10k-labels-idx1-ubyte.gz
# Parameters
learning_rate = 0.01
num_steps = 500
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 8 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# Create model
def neural_net(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Hidden fully connected layer with 256 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = neural_net(X)
# Define loss and optimizer
try :
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=Y))
except:
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
iteration =[]
costs = []
sess = tf.Session()
# Run the initializer
sess.run(init)
for step in range(1, num_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
iteration.append(step)
costs.append(1-accuracy)
#plt.plot(np.asarray(iteration), np.asarray(costs))
#plt.hold
#plt.show()
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization Finished!")
# Calculate accuracy for MNIST test images
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={X: mnist.test.images,
Y: mnist.test.labels}))
Step 1, Minibatch Loss= 336.8955, Training Accuracy= 0.164 Step 100, Minibatch Loss= 22.9245, Training Accuracy= 0.781 Step 200, Minibatch Loss= 7.6379, Training Accuracy= 0.883 Step 300, Minibatch Loss= 13.3900, Training Accuracy= 0.867 Step 400, Minibatch Loss= 3.4003, Training Accuracy= 0.906 Step 500, Minibatch Loss= 7.8205, Training Accuracy= 0.836 Optimization Finished! Testing Accuracy: 0.8799
xs, ys = mnist.train.next_batch(batch_size)
print(xs[0])
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.03921569 0.5882353 0.5882353 0.83921576 0.83921576 0.9921569 1. 0.9960785 0.28627452 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.5294118 0.9921569 0.9921569 0.9921569 0.9921569 0.9921569 0.9921569 0.9921569 0.28235295 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.2784314 0.98823535 0.9921569 0.9921569 0.96470594 0.85098046 0.85098046 0.9607844 0.9921569 0.28235295 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.28627452 0.9921569 0.91372555 0.32941177 0.20784315 0. 0. 0.75294125 0.95294124 0.19215688 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02352941 0.43921572 0.20000002 0. 0. 0. 0. 0.75294125 0.8705883 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.11764707 0.9215687 0.5882353 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.16470589 0.9921569 0.4431373 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.16470589 0.9921569 0.05882353 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.5058824 0.9921569 0.05882353 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.654902 0.9921569 0.05882353 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.15294118 0.9843138 0.7176471 0.01176471 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.38823533 0.9921569 0.64705884 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.38823533 0.9921569 0.34117648 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.43921572 0.83921576 0.01568628 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.0627451 0.854902 0.8313726 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.5686275 0.9921569 0.7960785 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.00784314 0.7137255 0.9921569 0.427451 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02352941 0.9921569 0.97647065 0.07450981 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02352941 0.9921569 0.60784316 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.02352941 0.9921569 0.4431373 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]
def gen_image(arr):
two_d = (np.reshape(arr, (28, 28)) * 255).astype(np.uint8)
plt.imshow(two_d, interpolation='nearest')
return plt
# Get a batch of two random images and show in a pop-up window.
batch_xs, batch_ys = mnist.test.next_batch(batch_size)
#print(batch_xs[0])
#gen_image(batch_xs[0]).show()
#gen_image(batch_xs[1]).show()
print(batch_xs.shape[0])
#print(np.reshape(batch_xs[0],(1,784)).shape)
for i in range(0,batch_xs.shape[0]):
output = sess.run(logits, feed_dict = {X: np.reshape(batch_xs[i],(1,784))})
print('Image:')
gen_image(batch_xs[i]).show()
print('Given Value: ', np.argmax(batch_ys[i]))
print('Predicted Value:', np.argmax(output), '\n\n\n\n')
128 Image:
Given Value: 4 Predicted Value: 9 Image:
Given Value: 1 Predicted Value: 1 Image:
Given Value: 1 Predicted Value: 1 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 3 Predicted Value: 5 Image:
Given Value: 5 Predicted Value: 8 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 1 Predicted Value: 1 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 5 Predicted Value: 5 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 5 Predicted Value: 5 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 9 Predicted Value: 7 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 7 Predicted Value: 7 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 9 Predicted Value: 3 Image:
Given Value: 7 Predicted Value: 9 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 5 Predicted Value: 5 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 7 Predicted Value: 9 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 1 Predicted Value: 8 Image:
Given Value: 7 Predicted Value: 7 Image:
Given Value: 5 Predicted Value: 5 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 7 Predicted Value: 7 Image:
Given Value: 9 Predicted Value: 1 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 1 Predicted Value: 1 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 0 Predicted Value: 3 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 5 Predicted Value: 5 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 0 Predicted Value: 8 Image:
Given Value: 9 Predicted Value: 4 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 7 Predicted Value: 7 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 1 Predicted Value: 1 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 1 Predicted Value: 1 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 4 Predicted Value: 9 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 7 Predicted Value: 7 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 7 Predicted Value: 7 Image:
Given Value: 7 Predicted Value: 2 Image:
Given Value: 5 Predicted Value: 4 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 7 Predicted Value: 7 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 5 Predicted Value: 5 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 9 Predicted Value: 4 Image:
Given Value: 7 Predicted Value: 9 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 4 Predicted Value: 4 Image:
Given Value: 8 Predicted Value: 8 Image:
Given Value: 1 Predicted Value: 1 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 7 Predicted Value: 3 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 6 Predicted Value: 6 Image:
Given Value: 2 Predicted Value: 1 Image:
Given Value: 2 Predicted Value: 2 Image:
Given Value: 7 Predicted Value: 7 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 1 Predicted Value: 1 Image:
Given Value: 3 Predicted Value: 8 Image:
Given Value: 5 Predicted Value: 5 Image:
Given Value: 0 Predicted Value: 0 Image:
Given Value: 5 Predicted Value: 8 Image:
Given Value: 9 Predicted Value: 9 Image:
Given Value: 3 Predicted Value: 3 Image:
Given Value: 9 Predicted Value: 7 Image:
Given Value: 5 Predicted Value: 5
Github tutorials