import tensorflow as tf
# Download MNIST datasource
# 6w个 28 * 28个像素的手写数字图片集
# 用 [60000, 784]的张量表示 [图片索引, 图片像素点索引]
from tensorflow.examples.tutorials.mnist import input_data
# `one-hot vectors`:向量中只有一个数据为 1,其余维度只能为 0
# 转化为 [60000, 10]的张量表示 [图片索引, 图片表示的数值]
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
Extracting MNIST_data/train-images-idx3-ubyte.gz Extracting MNIST_data/train-labels-idx1-ubyte.gz Extracting MNIST_data/t10k-images-idx3-ubyte.gz Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
# 28 * 28 = 784的占位符
# None表示可能是任何数值
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
z = tf.placeholder(tf.float32) # 用于 drop_out操作时的依据 (0.8: 80%的神经元在工作)
lr = tf.Variable(0.001, dtype = tf.float32) # 用于不断递减的学习率,使得梯度下降到最低点时,能更好地命中
# 权重值(截断的随机正太分布) 和 偏置量 (0.1)
W1 = tf.Variable(tf.truncated_normal([784, 600], stddev = 0.1))
b1 = tf.Variable(tf.zeros([600]) + 0.1)
L1 = tf.nn.tanh(tf.matmul(x, W1) + b1)
L1_drop = tf.nn.dropout(L1, z)
# 隐藏层
W2 = tf.Variable(tf.truncated_normal([600, 400], stddev = 0.1))
b2 = tf.Variable(tf.zeros([400]) + 0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop, W2) + b2)
L2_drop = tf.nn.dropout(L2, z)
W3 = tf.Variable(tf.truncated_normal([400, 10], stddev = 0.1))
b3 = tf.Variable(tf.zeros([10]) + 0.1)
# softmax回归模型
prediction = tf.nn.softmax(tf.matmul(L2_drop, W3) + b3)
# 二次 Loss Func
# loss = tf.reduce_mean(tf.square(y - prediction))
# 交叉熵 Loss Func
# loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(prediction), reduction_indices=[1]))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = prediction))
# 梯度下降
# train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
# 评估模型
# 判断 一维张量 y、prediction中最大值的位置是否相等
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction,1))
# 准确率
# 将 布尔型列表 corrent_prediction转化为 float32类型
# [true, false, false, ...] => [1.0, 0., 0., ...]
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.device('/gpu:0'):
with tf.Session() as sess:
tf.global_variables_initializer().run()
batch_size = 100
batch = (int) (60000 / batch_size)
# batch = mnist.train.num_examples
for _ in range(101):
sess.run(tf.assign(lr, 0.001 * (0.95 ** _)))
for batch_step in range(batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step, feed_dict = {x: batch_xs, y: batch_ys, z: 0.9973})
if (_ % 10) == 0:
test_accuracy = sess.run(accuracy, feed_dict = {x: mnist.test.images, y: mnist.test.labels, z: 1.0})
train_accuracy = sess.run(accuracy, feed_dict = {x: mnist.train.images, y: mnist.train.labels, z: 1.0})
print("Batch: ", _, "Accuracy: [", test_accuracy, ",", train_accuracy, "]")
Batch: 0 Accuracy: [ 0.9573 , 0.962128 ] Batch: 10 Accuracy: [ 0.9803 , 0.994455 ] Batch: 20 Accuracy: [ 0.9795 , 0.997073 ] Batch: 30 Accuracy: [ 0.9816 , 0.997709 ] Batch: 40 Accuracy: [ 0.9828 , 0.997946 ] Batch: 50 Accuracy: [ 0.9823 , 0.998128 ] Batch: 60 Accuracy: [ 0.9828 , 0.998237 ] Batch: 70 Accuracy: [ 0.9828 , 0.998309 ] Batch: 80 Accuracy: [ 0.9831 , 0.998346 ] Batch: 90 Accuracy: [ 0.9828 , 0.9984 ] Batch: 100 Accuracy: [ 0.9831 , 0.9984 ]
# 二次 Loss Func
Batch: 0 Accuracy: 0.8394
Batch: 10 Accuracy: 0.9067
Batch: 20 Accuracy: 0.9142
Batch: 30 Accuracy: 0.9187
Batch: 40 Accuracy: 0.9199
Batch: 50 Accuracy: 0.9219
# 交叉熵 Loss Func
Batch: 0 Accuracy: 0.8262
Batch: 10 Accuracy: 0.9183
Batch: 20 Accuracy: 0.9224
Batch: 30 Accuracy: 0.9232
Batch: 40 Accuracy: 0.9273
Batch: 50 Accuracy: 0.9274
# 隐藏层 + DropOut
Batch: 0 Accuracy: [ 0.9176 , 0.915527 ]
Batch: 10 Accuracy: [ 0.9565 , 0.963182 ]
Batch: 20 Accuracy: [ 0.9669 , 0.975236 ]
Batch: 30 Accuracy: [ 0.9718 , 0.982 ]
Batch: 40 Accuracy: [ 0.9737 , 0.984836 ]
Batch: 50 Accuracy: [ 0.9768 , 0.987036 ]
# AdamOptimizer
Batch: 0 Accuracy: [ 0.9573 , 0.962128 ]
Batch: 10 Accuracy: [ 0.9803 , 0.994455 ]
Batch: 20 Accuracy: [ 0.9795 , 0.997073 ]
Batch: 30 Accuracy: [ 0.9816 , 0.997709 ]
Batch: 40 Accuracy: [ 0.9828 , 0.997946 ]
Batch: 50 Accuracy: [ 0.9823 , 0.998128 ]
Batch: 60 Accuracy: [ 0.9828 , 0.998237 ]
Batch: 70 Accuracy: [ 0.9828 , 0.998309 ]
Batch: 80 Accuracy: [ 0.9831 , 0.998346 ]
Batch: 90 Accuracy: [ 0.9828 , 0.9984 ]
Batch: 100 Accuracy: [ 0.9831 , 0.9984 ]