import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
Extracting MNIST_data\train-images-idx3-ubyte.gz Extracting MNIST_data\train-labels-idx1-ubyte.gz Extracting MNIST_data\t10k-images-idx3-ubyte.gz Extracting MNIST_data\t10k-labels-idx1-ubyte.gz
# 权值
def weight_variable(shape):
initial = tf.truncated_normal(shape=shape, stddev=0.1)
return tf.Variable(initial_value=initial)
# 偏置值
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial_value=initial)
# 卷积层
def conv2d(x, W):
# x: ` [batch, in_height, in_width, in_channels]`
# [批次大小, 输入图片的长和宽, 通道数 (黑白:2; 彩色: 3)]
# W: `[filter_height, filter_width, in_channels, out_channels]`
# [滤波器长, 宽,输入通道数, 输出通道数]
# strides: `[1, stride, stride, 1]`
# [固定为1, x/y方向的步长, 固定为1]
# padding: 是否在外部补零
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# 池化层
def max_pool_2x2(x):
# x: ` [batch, in_height, in_width, in_channels]`
# [批次大小, 输入图片的长和宽, 通道数 (黑白:2; 彩色: 3)]
# ksize: [固定为1, 窗口大小, 固定为1]
# strides: `[1, stride, stride, 1]`
# [固定为1, x/y方向的步长, 固定为1]
# padding: 是否在外部补零
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Place Holder
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
# Learn Rate学习率
lr = tf.Variable(0.001, dtype = tf.float32)
# 将 x的转化为 4D向量
# [batch, in_height, in_width, in_channels]
x_image = tf.reshape(x, [-1, 28, 28, 1])
# 初始化第一个卷积层 权值和偏置值
# 5*5 的采样窗口,32个卷积核(输出channels数) 从1个平面(输入channels数) 抽取特征,获得 32个特征平面
W_conv1 = weight_variable([5, 5, 1, 32])
# 32个卷积核,每个卷积核对应一个偏置值
b_conv1 = bias_variable([32])
# 执行卷积采样操作,并加上偏置值
conv2d_1 = conv2d(x_image, W_conv1) + b_conv1
# ReLU激活函数,获得第一个卷积层,计算得到的结果
h_conv1 = tf.nn.relu(conv2d_1)
# 执行 pooling池化操作
h_pool1 = max_pool_2x2(h_conv1)
# 第二个卷积层 + 激活函数 + 池化层
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
conv2d_2 = conv2d(h_pool1, W_conv2) + b_conv2
h_conv2 = tf.nn.relu(conv2d_2)
h_pool2 = max_pool_2x2(h_conv2)
# 第一次卷积操作后,28 * 28图片仍然是 28 * 28
# 第一次池化之后,因为 2 * 2的窗口,所以变成了 14 * 14
# 第二次卷积之后,仍然保持 14 * 14的平面大小
# 第二次池化之后,因为 2 * 2的窗口,所以变成了 7 * 7
# 全连接层一共有 1000个神经元,连接上一层的 7 * 7* 64 = 3136个神经元
W_fc1 = weight_variable([7 * 7 * 64, 1000])
b_fc1 = bias_variable([1000])
# 把上一层的池化层,转化为 1维 (-1代表任意值)
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# 矩阵相乘,并加上偏置值
wx_plus_b1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1
# ReLU激活函数
h_fc1 = tf.nn.relu(wx_plus_b1)
# dropout正则化
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# 第二个全连接层
W_fc2 = weight_variable([1000, 10])
b_fc2 = bias_variable([10])
# 计算输出
wx_plus_b2 = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
prediction = tf.nn.softmax(wx_plus_b2)
# 交叉熵 Loss Function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
# Adam优化器,配合一个不断下降的学习率
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# argmax方法,会返回一维张量中最大值所在的位置
# 计算正确率
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.device('/gpu:0'):
with tf.Session() as sess:
tf.global_variables_initializer().run()
batch_size = 100
batch = (int) (60000 / batch_size)
for _ in range(101):
sess.run(tf.assign(lr, 0.0001 * (0.95 ** _)))
for batch_step in range(batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.68})
if _ % 20 == 0:
test_acc = sess.run(accuracy, feed_dict = {x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})
print("Iterator: ", _, "Accuracy:", test_acc)
Iterator: 0 Accuracy: 0.954 Iterator: 20 Accuracy: 0.9912 Iterator: 40 Accuracy: 0.9916 Iterator: 60 Accuracy: 0.9924 Iterator: 80 Accuracy: 0.9924 Iterator: 100 Accuracy: 0.9926