import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
mnist = input_data.read_data_sets(".", one_hot=True, reshape=False)
import tensorflow as tf
#tf.set_random_seed(2)
# parameters
learning_rate = 0.00001
epochs = 1000
batch_size = 128
# number of samples to calculate validation and accuracy
# decrease this if you're running out of memory
test_valid_size = 256
# network Parameters
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.5 # dropout (probability to keep units)
Extracting ./train-images-idx3-ubyte.gz Extracting ./train-labels-idx1-ubyte.gz Extracting ./t10k-images-idx3-ubyte.gz Extracting ./t10k-labels-idx1-ubyte.gz
# store weights & biases
weights = {
'wc1': tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1)),
'wc2': tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1)),
'wd1': tf.Variable(tf.truncated_normal([7*7*64, 1024], stddev=0.1)),
'out': tf.Variable(tf.truncated_normal([1024, n_classes], stddev=0.1))}
biases = {
'bc1': tf.Variable(tf.constant(0.1,shape=[32])),
'bc2': tf.Variable(tf.constant(0.1,shape=[64])),
'bd1': tf.Variable(tf.constant(0.1,shape=[1024])),
'out': tf.Variable(tf.constant(0.1,shape=[n_classes]))}
def conv2d(x, W, b, strides=1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
return tf.nn.max_pool(
x,
ksize=[1, k, k, 1],
strides=[1, k, k, 1],
padding='SAME')
def conv_net(x, weights, biases, dropout):
# Layer 1 - 28*28*1 to 14*14*32
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
conv1 = maxpool2d(conv1, k=2)
# Layer 2 - 14*14*32 to 7*7*64
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
conv2 = maxpool2d(conv2, k=2)
# Fully connected layer - 7*7*64 to 1024
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, dropout)
# Output Layer - class prediction - 1024 to 10
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# tf Graph input
x = tf.placeholder(tf.float32, [None, 28, 28, 1])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)
# Model
logits = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(\
tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\
.minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf. global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
train_accuracies = []
val_accuracies = []
for batch in range(mnist.train.num_examples//batch_size):
batch_x, batch_y = mnist.train.next_batch(batch_size)
feed_dict={
x: batch_x,
y: batch_y,
keep_prob: dropout}
_,acc = sess.run([optimizer,accuracy], feed_dict=feed_dict)
train_accuracies.append(acc)
# Calculate batch loss and accuracy
loss = sess.run(cost, feed_dict={
x: batch_x,
y: batch_y,
keep_prob: 1.})
valid_acc = sess.run(accuracy, feed_dict={
x: mnist.validation.images[:test_valid_size],
y: mnist.validation.labels[:test_valid_size],
keep_prob: 1.})
val_accuracies.append(valid_acc)
train_acc_mean=np.mean(train_accuracies)
val_acc_mean=np.mean(val_accuracies)
if( (epoch+1) % 50 == 0):
print('Epoch {:>2}, '
'Loss: {:>10.4f} Training Accuracy:{:.6f}, Validation Accuracy: {:.6f}'.format(epoch + 1,loss,train_acc_mean,val_acc_mean))
# Calculate Test Accuracy
test_acc = sess.run(accuracy, feed_dict={
x: mnist.test.images[:test_valid_size],
y: mnist.test.labels[:test_valid_size],
keep_prob: 1.})
print('Testing Accuracy: {}'.format(test_acc))
Epoch 50, Loss: 1.2460 Training Accuracy:0.339798, Validation Accuracy: 0.636364 Epoch 100, Loss: 1.0122 Training Accuracy:0.513822, Validation Accuracy: 0.753906 Epoch 150, Loss: 0.7675 Training Accuracy:0.617315, Validation Accuracy: 0.820312 Epoch 200, Loss: 0.6523 Training Accuracy:0.679815, Validation Accuracy: 0.848312 Epoch 250, Loss: 0.4243 Training Accuracy:0.727309, Validation Accuracy: 0.886719 Epoch 300, Loss: 0.4262 Training Accuracy:0.761546, Validation Accuracy: 0.898438 Epoch 350, Loss: 0.3318 Training Accuracy:0.784328, Validation Accuracy: 0.902344 Epoch 400, Loss: 0.3039 Training Accuracy:0.806181, Validation Accuracy: 0.902344 Epoch 450, Loss: 0.3382 Training Accuracy:0.822024, Validation Accuracy: 0.914062 Epoch 500, Loss: 0.3123 Training Accuracy:0.833515, Validation Accuracy: 0.917969 Epoch 550, Loss: 0.2461 Training Accuracy:0.845061, Validation Accuracy: 0.917969 Epoch 600, Loss: 0.3075 Training Accuracy:0.849523, Validation Accuracy: 0.917969 Epoch 650, Loss: 0.3382 Training Accuracy:0.859958, Validation Accuracy: 0.921875 Epoch 700, Loss: 0.2209 Training Accuracy:0.867461, Validation Accuracy: 0.925781 Epoch 750, Loss: 0.2217 Training Accuracy:0.870994, Validation Accuracy: 0.925781 Epoch 800, Loss: 0.2504 Training Accuracy:0.876475, Validation Accuracy: 0.925781 Epoch 850, Loss: 0.3488 Training Accuracy:0.882230, Validation Accuracy: 0.925781 Epoch 900, Loss: 0.1096 Training Accuracy:0.887201, Validation Accuracy: 0.931782 Epoch 950, Loss: 0.1928 Training Accuracy:0.890024, Validation Accuracy: 0.933594 Epoch 1000, Loss: 0.1055 Training Accuracy:0.894595, Validation Accuracy: 0.933594 Testing Accuracy: 0.96875