# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
# The folder when dumped big 3D array has been stored from previous excercise
data_root = 'D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data'
#a big 3D array to a big file.
pickle_file = 'notMNIST.pickle'
with open(data_root + '\\' + pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Training set (200000, 28, 28) (200000,) Validation set (10000, 28, 28) (10000,) Test set (10000, 28, 28) (10000,)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Training set (200000, 784) (200000, 10) Validation set (10000, 784) (10000, 10) Test set (10000, 784) (10000, 10)
# It loads all the data into TensorFlow and build the computation graph corresponding to our training:
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random values following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 10000 #why 801?
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
#it performs the train
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.global_variables_initializer().run()
print('Tensorflow graph initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
#TODO plot graph from accuracy data
Tensorflow graph initialized Loss at step 0: 19.005764 Training accuracy: 8.6% Validation accuracy: 9.5% Loss at step 100: 2.306310 Training accuracy: 71.9% Validation accuracy: 70.7% Loss at step 200: 1.862624 Training accuracy: 75.1% Validation accuracy: 72.8% Loss at step 300: 1.619365 Training accuracy: 76.3% Validation accuracy: 73.7% Loss at step 400: 1.452627 Training accuracy: 77.3% Validation accuracy: 74.1% Loss at step 500: 1.328036 Training accuracy: 77.8% Validation accuracy: 74.4% Loss at step 600: 1.230201 Training accuracy: 78.4% Validation accuracy: 74.8% Loss at step 700: 1.151069 Training accuracy: 79.0% Validation accuracy: 74.9% Loss at step 800: 1.085528 Training accuracy: 79.5% Validation accuracy: 75.0% Loss at step 900: 1.030110 Training accuracy: 79.9% Validation accuracy: 75.1% Loss at step 1000: 0.982457 Training accuracy: 80.3% Validation accuracy: 75.3% Loss at step 1100: 0.940917 Training accuracy: 80.7% Validation accuracy: 75.3% Loss at step 1200: 0.904299 Training accuracy: 81.0% Validation accuracy: 75.5% Loss at step 1300: 0.871726 Training accuracy: 81.6% Validation accuracy: 75.5% Loss at step 1400: 0.842531 Training accuracy: 81.8% Validation accuracy: 75.6% Loss at step 1500: 0.816180 Training accuracy: 82.2% Validation accuracy: 75.7% Loss at step 1600: 0.792249 Training accuracy: 82.4% Validation accuracy: 75.9% Loss at step 1700: 0.770396 Training accuracy: 82.6% Validation accuracy: 76.0% Loss at step 1800: 0.750347 Training accuracy: 82.8% Validation accuracy: 76.2% Loss at step 1900: 0.731872 Training accuracy: 83.0% Validation accuracy: 76.2% Loss at step 2000: 0.714789 Training accuracy: 83.3% Validation accuracy: 76.2% Loss at step 2100: 0.698939 Training accuracy: 83.5% Validation accuracy: 76.3% Loss at step 2200: 0.684192 Training accuracy: 83.7% Validation accuracy: 76.4% Loss at step 2300: 0.670434 Training accuracy: 84.0% Validation accuracy: 76.5% Loss at step 2400: 0.657569 Training accuracy: 84.1% Validation accuracy: 76.4% Loss at step 2500: 0.645511 Training accuracy: 84.3% Validation accuracy: 76.4% Loss at step 2600: 0.634187 Training accuracy: 84.5% Validation accuracy: 76.5% Loss at step 2700: 0.623530 Training accuracy: 84.7% Validation accuracy: 76.4% Loss at step 2800: 0.613483 Training accuracy: 84.9% Validation accuracy: 76.4% Loss at step 2900: 0.603993 Training accuracy: 85.0% Validation accuracy: 76.5% Loss at step 3000: 0.595014 Training accuracy: 85.2% Validation accuracy: 76.5% Loss at step 3100: 0.586504 Training accuracy: 85.4% Validation accuracy: 76.5% Loss at step 3200: 0.578426 Training accuracy: 85.6% Validation accuracy: 76.6% Loss at step 3300: 0.570746 Training accuracy: 85.8% Validation accuracy: 76.6% Loss at step 3400: 0.563433 Training accuracy: 85.8% Validation accuracy: 76.6% Loss at step 3500: 0.556461 Training accuracy: 86.0% Validation accuracy: 76.7% Loss at step 3600: 0.549805 Training accuracy: 86.1% Validation accuracy: 76.7% Loss at step 3700: 0.543443 Training accuracy: 86.2% Validation accuracy: 76.6% Loss at step 3800: 0.537355 Training accuracy: 86.4% Validation accuracy: 76.7% Loss at step 3900: 0.531522 Training accuracy: 86.5% Validation accuracy: 76.7% Loss at step 4000: 0.525927 Training accuracy: 86.5% Validation accuracy: 76.7% Loss at step 4100: 0.520556 Training accuracy: 86.7% Validation accuracy: 76.8% Loss at step 4200: 0.515394 Training accuracy: 86.8% Validation accuracy: 76.8% Loss at step 4300: 0.510427 Training accuracy: 86.9% Validation accuracy: 76.8% Loss at step 4400: 0.505645 Training accuracy: 87.0% Validation accuracy: 76.9% Loss at step 4500: 0.501034 Training accuracy: 87.1% Validation accuracy: 76.9% Loss at step 4600: 0.496588 Training accuracy: 87.3% Validation accuracy: 77.0% Loss at step 4700: 0.492295 Training accuracy: 87.5% Validation accuracy: 77.0% Loss at step 4800: 0.488147 Training accuracy: 87.6% Validation accuracy: 77.0% Loss at step 4900: 0.484136 Training accuracy: 87.7% Validation accuracy: 77.0% Loss at step 5000: 0.480255 Training accuracy: 87.8% Validation accuracy: 77.0% Loss at step 5100: 0.476498 Training accuracy: 87.9% Validation accuracy: 76.9% Loss at step 5200: 0.472857 Training accuracy: 88.0% Validation accuracy: 77.0% Loss at step 5300: 0.469328 Training accuracy: 88.0% Validation accuracy: 76.9% Loss at step 5400: 0.465905 Training accuracy: 88.1% Validation accuracy: 77.0% Loss at step 5500: 0.462581 Training accuracy: 88.2% Validation accuracy: 77.0% Loss at step 5600: 0.459354 Training accuracy: 88.2% Validation accuracy: 77.0% Loss at step 5700: 0.456219 Training accuracy: 88.2% Validation accuracy: 77.0% Loss at step 5800: 0.453170 Training accuracy: 88.3% Validation accuracy: 77.0% Loss at step 5900: 0.450206 Training accuracy: 88.3% Validation accuracy: 77.1% Loss at step 6000: 0.447320 Training accuracy: 88.4% Validation accuracy: 77.1% Loss at step 6100: 0.444511 Training accuracy: 88.4% Validation accuracy: 77.2% Loss at step 6200: 0.441775 Training accuracy: 88.5% Validation accuracy: 77.2% Loss at step 6300: 0.439109 Training accuracy: 88.5% Validation accuracy: 77.2% Loss at step 6400: 0.436509 Training accuracy: 88.6% Validation accuracy: 77.2% Loss at step 6500: 0.433974 Training accuracy: 88.6% Validation accuracy: 77.2% Loss at step 6600: 0.431500 Training accuracy: 88.7% Validation accuracy: 77.2% Loss at step 6700: 0.429086 Training accuracy: 88.8% Validation accuracy: 77.2% Loss at step 6800: 0.426728 Training accuracy: 88.8% Validation accuracy: 77.2% Loss at step 6900: 0.424425 Training accuracy: 88.9% Validation accuracy: 77.2% Loss at step 7000: 0.422174 Training accuracy: 89.0% Validation accuracy: 77.2% Loss at step 7100: 0.419974 Training accuracy: 89.1% Validation accuracy: 77.1% Loss at step 7200: 0.417823 Training accuracy: 89.1% Validation accuracy: 77.2% Loss at step 7300: 0.415718 Training accuracy: 89.2% Validation accuracy: 77.2% Loss at step 7400: 0.413659 Training accuracy: 89.3% Validation accuracy: 77.2% Loss at step 7500: 0.411643 Training accuracy: 89.3% Validation accuracy: 77.2% Loss at step 7600: 0.409670 Training accuracy: 89.4% Validation accuracy: 77.2% Loss at step 7700: 0.407737 Training accuracy: 89.5% Validation accuracy: 77.2% Loss at step 7800: 0.405843 Training accuracy: 89.5% Validation accuracy: 77.2% Loss at step 7900: 0.403987 Training accuracy: 89.6% Validation accuracy: 77.2% Loss at step 8000: 0.402168 Training accuracy: 89.7% Validation accuracy: 77.2% Loss at step 8100: 0.400384 Training accuracy: 89.7% Validation accuracy: 77.2% Loss at step 8200: 0.398635 Training accuracy: 89.8% Validation accuracy: 77.2% Loss at step 8300: 0.396918 Training accuracy: 89.8% Validation accuracy: 77.1% Loss at step 8400: 0.395234 Training accuracy: 89.8% Validation accuracy: 77.1% Loss at step 8500: 0.393582 Training accuracy: 89.8% Validation accuracy: 77.2% Loss at step 8600: 0.391959 Training accuracy: 89.9% Validation accuracy: 77.2% Loss at step 8700: 0.390365 Training accuracy: 89.9% Validation accuracy: 77.2% Loss at step 8800: 0.388800 Training accuracy: 90.0% Validation accuracy: 77.2% Loss at step 8900: 0.387263 Training accuracy: 90.0% Validation accuracy: 77.2% Loss at step 9000: 0.385752 Training accuracy: 90.1% Validation accuracy: 77.3% Loss at step 9100: 0.384267 Training accuracy: 90.1% Validation accuracy: 77.2% Loss at step 9200: 0.382808 Training accuracy: 90.2% Validation accuracy: 77.2% Loss at step 9300: 0.381373 Training accuracy: 90.2% Validation accuracy: 77.2% Loss at step 9400: 0.379962 Training accuracy: 90.3% Validation accuracy: 77.2% Loss at step 9500: 0.378574 Training accuracy: 90.3% Validation accuracy: 77.2% Loss at step 9600: 0.377209 Training accuracy: 90.3% Validation accuracy: 77.2% Loss at step 9700: 0.375866 Training accuracy: 90.3% Validation accuracy: 77.2% Loss at step 9800: 0.374544 Training accuracy: 90.4% Validation accuracy: 77.2% Loss at step 9900: 0.373243 Training accuracy: 90.4% Validation accuracy: 77.2% Test accuracy: 84.2%
# Let's now switch to stochastic gradient descent training instead, which is much faster.
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 10000
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
#TODO measure time
Initialized Minibatch loss at step 0: 17.899033 Minibatch accuracy: 7.8% Validation accuracy: 10.0% Minibatch loss at step 500: 1.193777 Minibatch accuracy: 79.7% Validation accuracy: 75.0% Minibatch loss at step 1000: 1.287950 Minibatch accuracy: 82.0% Validation accuracy: 76.5% Minibatch loss at step 1500: 0.656950 Minibatch accuracy: 81.2% Validation accuracy: 77.4% Minibatch loss at step 2000: 0.924349 Minibatch accuracy: 78.9% Validation accuracy: 77.3% Minibatch loss at step 2500: 1.161976 Minibatch accuracy: 74.2% Validation accuracy: 78.2% Minibatch loss at step 3000: 0.851813 Minibatch accuracy: 80.5% Validation accuracy: 78.8% Minibatch loss at step 3500: 0.921817 Minibatch accuracy: 81.2% Validation accuracy: 78.6% Minibatch loss at step 4000: 0.835667 Minibatch accuracy: 80.5% Validation accuracy: 79.0% Minibatch loss at step 4500: 0.808454 Minibatch accuracy: 79.7% Validation accuracy: 79.4% Minibatch loss at step 5000: 0.658319 Minibatch accuracy: 81.2% Validation accuracy: 79.5% Minibatch loss at step 5500: 0.857714 Minibatch accuracy: 74.2% Validation accuracy: 79.5% Minibatch loss at step 6000: 0.938224 Minibatch accuracy: 76.6% Validation accuracy: 79.7% Minibatch loss at step 6500: 0.562490 Minibatch accuracy: 81.2% Validation accuracy: 80.0% Minibatch loss at step 7000: 0.773726 Minibatch accuracy: 76.6% Validation accuracy: 80.2% Minibatch loss at step 7500: 0.964890 Minibatch accuracy: 78.9% Validation accuracy: 80.0% Minibatch loss at step 8000: 1.113154 Minibatch accuracy: 72.7% Validation accuracy: 80.1% Minibatch loss at step 8500: 0.644020 Minibatch accuracy: 83.6% Validation accuracy: 80.6% Minibatch loss at step 9000: 0.761561 Minibatch accuracy: 82.0% Validation accuracy: 80.0% Minibatch loss at step 9500: 0.601618 Minibatch accuracy: 85.2% Validation accuracy: 80.9% Test accuracy: 86.6%
# Turn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units nn.relu()
# and 1024 hidden nodes. This model should improve your validation / test accuracy.
#Do TODOs