#!/usr/bin/env python # coding: utf-8 # Install # ===== # Installing tensorflow environment from command line: # 1. conda create -n tensorflow python=3.5 # 2. activate tensorflow # 3. install jupyter # # Or use Anaconda Navigator to manage environments from there. # # Tutorial # ===== # When one learns how to program, there's a tradition that the first thing you do is print "Hello World." Just like programming has Hello World, machine learning has MNIST. # # MNIST is a simple computer vision dataset. It consists of images of handwritten digits. It also includes labels for each image, telling us which digit it is. # # In this tutorial, we're going to train a model to look at images and predict what digits they are. Our goal isn't to train a really elaborate model that achieves state-of-the-art performance but rather to dip a toe into using TensorFlow. As such, we're going to start with a very simple model, called a Softmax Regression. # # The actual code for this tutorial is very short, and all the interesting stuff happens in just three lines. However, it is very important to understand the ideas behind it: both how TensorFlow works and the core machine learning concepts. # # ## Read input and test data # In[1]: from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # ## Implementing the model # # $$ y = softmax(x \cdot W + B) $$ # In[5]: import tensorflow as tf x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b) # In[6]: # ## Training # In[12]: y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) for i in range(1000): batch_xs, batch_ys = mnist.train.next_batch(200) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # ## Evaluating the Model # In[13]: correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) # In[ ]: