#!/usr/bin/env python # coding: utf-8 # ### 선형회귀 모델에 대한 GrandientDescentOptimizer 활용 # - 1개의 값씩 훈련을 시키는 코드 # In[88]: import tensorflow as tf import numpy as np # Our real model of y = 2 * x + 6 def y_real(x): return tf.mul(x, 2) + 6 # Our model of y = w * x + b def y_model(x, w, b): return tf.mul(x, w) + b # x and y are placeholders for our training data x = tf.placeholder("float") # w and b are the variables storing our values. It is initialised with starting "guesses" w = tf.Variable(1.0, name="w") b = tf.Variable(1.0, name="b") # Our error is defined as the square of the differences error = tf.square(y_real(x) - y_model(x, w, b)) # The Gradient Descent Optimizer does the heavy lifting train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error) # Normal TensorFlow - initialize values, create a session and run the model model = tf.initialize_all_variables() errors = [] with tf.Session() as session: session.run(model) for i in range(2000): x_value = np.random.rand() _, error_value = session.run([train_op, error], feed_dict={x: x_value}) errors.append(error_value) if i % 100 == 0: print "Iteration: {0}, w: {1}, b: {2}".format(i, w.eval(), b.eval()) print "Predicted model: {0}x + {1}".format(w.eval(), b.eval()) # In[89]: import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') error_list = [errors[i] for i in range(len(errors))] num = 0 for error in error_list: if error < 0.00001: print num break num += 1 plt.plot(error_list) plt.show() plt.savefig("errors.png") # - batch 입력을 적용하여 n개의 값씩 훈련시키는 코드 # In[90]: # Our real model of y = 2 * x + 6 def y_real(x): return tf.mul(x, 2) + 6 # Our model of y = w * x + b def y_model(x, w, b): return tf.mul(x, w) + b # Batch batch_size = 2 # x and y are placeholders for our training data x = tf.placeholder("float", [None, 1]) # w and b are the variables storing our values. It is initialised with starting "guesses" w = tf.Variable([1.0], name="w") b = tf.Variable([1.0], name="b") # Our error is defined as the square of the differences error = tf.reduce_sum(tf.square(y_real(x) - y_model(x, w, b))) # The Gradient Descent Optimizer does the heavy lifting train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error) # Normal TensorFlow - initialize values, create a session and run the model model = tf.initialize_all_variables() errors = [] with tf.Session() as session: session.run(model) for i in range(2000): x_value = np.random.randn(batch_size, 1) _, error_value = session.run([train_op, error], feed_dict={x: x_value}) errors.append(error_value) if i % 100 == 0: print "Iteration: {0}, w: {1}, b: {2}".format(i, w.eval(), b.eval()) print "Predicted model: {0}x + {1}".format(w.eval(), b.eval()) # In[91]: error_list = [errors[i] for i in range(len(errors))] num = 0 for error in error_list: if error < 0.0001: print num break num += 1 plt.plot(error_list) plt.show() plt.savefig("errors.png")