In [1]:
import tensorflow as tf
# Download MNIST datasource
# 6w个 28 * 28个像素的手写数字图片集
# 用 [60000, 784]的张量表示 [图片索引, 图片像素点索引]
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.tensorboard.plugins import projector

# `one-hot vectors`:向量中只有一个数据为 1,其余维度只能为 0
# 转化为 [60000, 10]的张量表示 [图片索引, 图片表示的数值]
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)

image_num = 10000
embedding = tf.Variable(tf.stack(mnist.test.images[:image_num]), trainable = False, name = 'embedding')
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz
In [2]:
# 定义求统计指标的方法
def summaries(var):
    # 申明一个命名空间
    with tf.name_scope('summaries'):
        tf.summary.scalar('max', tf.reduce_max(var))        #最大值
        tf.summary.scalar('min', tf.reduce_min(var))         #最小值
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean)                          #平均值
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev)                      #标准差
        tf.summary.histogram('histogram', var)               #直方图
In [3]:
with tf.name_scope('input'):
    # 28 * 28 = 784的占位符
    # None表示可能是任何数值
    x = tf.placeholder(tf.float32, [None, 784], name = 'x_input')
    y = tf.placeholder(tf.float32, [None, 10], name = 'y_input')
    # 用于 drop_out操作时的依据 (0.8: 80%的神经元在工作)
    z = tf.placeholder(tf.float32, name = 'drop_output_input')
    lr = tf.Variable(0.001, dtype = tf.float32)           # 用于不断递减的学习率,使得梯度下降到最低点时,能更好地命中

with tf.name_scope('layer'):
    with tf.name_scope('layer_1'):
        # 权重值(截断的随机正太分布) 和 偏置量 (0.1)
        W1 = tf.Variable(tf.truncated_normal([784, 600], stddev = 0.1), name = 'W1')
        b1 = tf.Variable(tf.zeros([600]) + 0.1, name = 'b1')
        # 调用函数求权重、偏置值的统计指标
        summaries(W1)
        summaries(b1)
        L1 = tf.nn.tanh(tf.matmul(x, W1) + b1)
        L1_drop = tf.nn.dropout(L1, z)

    with tf.name_scope('layer_2'):
        # 隐藏层
        W2 = tf.Variable(tf.truncated_normal([600, 400], stddev = 0.1), name = 'W2')
        b2 = tf.Variable(tf.zeros([400]) + 0.1, name = 'b2')
        summaries(W2)
        summaries(b2)
        L2 = tf.nn.tanh(tf.matmul(L1_drop, W2) + b2)
        L2_drop = tf.nn.dropout(L2, z)

    with tf.name_scope('layer_output'):
        W3 = tf.Variable(tf.truncated_normal([400, 10], stddev = 0.1), name = 'W3')
        b3 = tf.Variable(tf.zeros([10]) + 0.1, name = 'b3')
        summaries(W3)
        summaries(b3)

    with tf.name_scope('softmax'):
        # softmax回归模型
        prediction = tf.nn.softmax(tf.matmul(L2_drop, W3) + b3)

with tf.name_scope('loss'):
    # 二次 Loss Func
    # loss = tf.reduce_mean(tf.square(y - prediction))
    # 交叉熵 Loss Func
    # loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(prediction), reduction_indices=[1]))
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = prediction))
    tf.summary.scalar('loss', loss)

with tf.name_scope('train'):
    # 梯度下降
    # train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    train_step = tf.train.AdamOptimizer(lr).minimize(loss)

with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
        # 评估模型
        # 判断 一维张量 y、prediction中最大值的位置是否相等
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction,1))
    with tf.name_scope('accuracy'):
        # 准确率
        # 将 布尔型列表 corrent_prediction转化为 float32类型
        # [true, false, false, ...]  => [1.0, 0., 0., ...]
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

# 获得所有定义的 Summary
summary_all = tf.summary.merge_all()

# 配置运行资源
session_config = tf.ConfigProto(device_count={"CPU": 8}, inter_op_parallelism_threads = 32, intra_op_parallelism_threads = 48)

with tf.Session(config = session_config) as sess:
    tf.global_variables_initializer().run()

    # 产生 MetaData文件
    base_path = 'E:/Jupyter/_drafts/ipython/TensorFlow/tensorboard/'
    metadata_path = base_path + 'metadata.tsv'
    if tf.gfile.Exists(metadata_path):
        tf.gfile.DeleteRecursively(metadata_path)
    with open(metadata_path, 'w') as f:
        labels = sess.run(tf.argmax(mnist.test.labels[:], 1))
        for i in range(image_num):
            f.write(str(labels[i]) + '\n')   

    writer = tf.summary.FileWriter(base_path, sess.graph)

    saver = tf.train.Saver()
    config = projector.ProjectorConfig()
    embed = config.embeddings.add()
    embed.tensor_name = embedding.name
    embed.metadata_path = metadata_path
    embed.sprite.image_path = base_path + 'data/mnist_10k_sprite.png'
    embed.sprite.single_image_dim.extend([28, 28])
    projector.visualize_embeddings(writer, config)

    batch_size = 100
    batch = (int) (60000 / batch_size)
    # batch = mnist.train.num_examples

    # 这里主要是为了测试 TensorBoard,所以只训练 5次
    summary_count = 0
    for _ in range(5):
        sess.run(tf.assign(lr, 0.001 * (0.95 ** _)))
        for batch_step in range(batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # 真正开始生成 metadata
            run_options = tf.RunOptions(trace_level = tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()
            summary_, result = sess.run([summary_all, train_step], feed_dict = {x: batch_xs, y: batch_ys, z: 0.997}, options = run_options, run_metadata = run_metadata)
            summary_count = summary_count + 1
            writer.add_run_metadata(run_metadata, 'step%03d' % summary_count)
            writer.add_summary(summary_, summary_count)

        test_accuracy = sess.run(accuracy, feed_dict = {x: mnist.test.images, y: mnist.test.labels, z: 1.0})
        train_accuracy = sess.run(accuracy, feed_dict = {x: mnist.train.images, y: mnist.train.labels, z: 1.0})
        print("Batch: ", _, "Accuracy: [", test_accuracy, ",", train_accuracy, "]")

    saver.save(sess, base_path + 'minst_model.ckpt', global_step = summary_count)
Batch:  0 Accuracy: [ 0.9549 , 0.958182 ]
Batch:  1 Accuracy: [ 0.9655 , 0.971745 ]
Batch:  2 Accuracy: [ 0.9703 , 0.980455 ]
Batch:  3 Accuracy: [ 0.9708 , 0.983745 ]
Batch:  4 Accuracy: [ 0.9746 , 0.986818 ]