import tensorflow as tf
print(tf.__version__)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
1.12.0 Default Graph : <tensorflow.python.framework.ops.Graph object at 0x10a292390> Default Session: None
tf.get_default_graph().get_operations()
[]
from sklearn import datasets, metrics, preprocessing
boston = datasets.load_boston()
x_data = preprocessing.StandardScaler().fit_transform(boston.data)
y_data = boston.target
print('x_data.shape :', x_data.shape)
print('y_data.shape :', y_data.shape)
x_data.shape : (506, 13) y_data.shape : (506,)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
x = tf.placeholder(tf.float64, shape=(None, 13))
y_true = tf.placeholder(tf.float64, shape=(None))
w = tf.Variable(initial_value=tf.zeros([1, 13], dtype=tf.float64, name='weights'))
b = tf.Variable(initial_value=0, dtype=tf.float64, name='bias')
y_pred = tf.matmul(w, tf.transpose(x)) + b
loss = tf.reduce_mean(tf.square(y_true-y_pred)) # MSE
learning_rate = 0.1
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(300):
MSE, _ = sess.run([loss, train], feed_dict={x: x_data, y_true: y_data})
if (step + 1) % 40 == 0:
print('Step: {:2d}\t MSE: {:.5f}'.format(step + 1, MSE))
loss = sess.run(loss, feed_dict={x: x_data, y_true: y_data})
print('\nMSE: {0:.5f}'.format(loss))
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
Default Graph : <tensorflow.python.framework.ops.Graph object at 0x10a292390> Default Session: None Step: 40 MSE: 494.40574 Step: 80 MSE: 418.47794 Step: 120 MSE: 357.52841 Step: 160 MSE: 307.04117 Step: 200 MSE: 264.59991 Step: 240 MSE: 228.68188 Step: 280 MSE: 198.19118 MSE: 184.06053 Default Graph : <tensorflow.python.framework.ops.Graph object at 0x10a292390> Default Session: <tensorflow.python.client.session.Session object at 0x1a35c07be0> Default Graph : <tensorflow.python.framework.ops.Graph object at 0x10a292390> Default Session: None
feature_column = [tf.feature_column.numeric_column(key='x', shape=13)]
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{'x': x_data}, y_data, shuffle=True, batch_size=506, num_epochs=300
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{'x': x_data}, y_data, shuffle=True, batch_size=506, num_epochs=1
)
reg = tf.estimator.LinearRegressor(
feature_columns=feature_column,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.001),
loss_reduction=tf.losses.Reduction.MEAN
)
reg.train(input_fn=train_input_fn)
MSE = reg.evaluate(input_fn=eval_input_fn)
print(MSE)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
INFO:tensorflow:Using default config. WARNING:tensorflow:Using temporary folder as model directory: /var/folders/3t/087xhfzj6knbg91h3f4np1pr0000gn/T/tmphicguchu INFO:tensorflow:Using config: {'_model_dir': '/var/folders/3t/087xhfzj6knbg91h3f4np1pr0000gn/T/tmphicguchu', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': allow_soft_placement: true graph_options { rewrite_options { meta_optimizer_iterations: ONE } } , '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x1a35bdefd0>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1} WARNING:tensorflow:From /Users/yhhan/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/inputs/queues/feeding_queue_runner.py:62: QueueRunner.__init__ (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version. Instructions for updating: To construct input pipelines, use the `tf.data` module. WARNING:tensorflow:From /Users/yhhan/anaconda3/lib/python3.6/site-packages/tensorflow/python/estimator/inputs/queues/feeding_functions.py:500: add_queue_runner (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version. Instructions for updating: To construct input pipelines, use the `tf.data` module. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Graph was finalized. INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. WARNING:tensorflow:From /Users/yhhan/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/monitored_session.py:804: start_queue_runners (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version. Instructions for updating: To construct input pipelines, use the `tf.data` module. INFO:tensorflow:Saving checkpoints for 0 into /var/folders/3t/087xhfzj6knbg91h3f4np1pr0000gn/T/tmphicguchu/model.ckpt. INFO:tensorflow:loss = 583.40393, step = 1 INFO:tensorflow:global_step/sec: 134.283 INFO:tensorflow:loss = 392.1066, step = 101 (0.746 sec) INFO:tensorflow:global_step/sec: 164.817 INFO:tensorflow:loss = 259.70663, step = 201 (0.607 sec) INFO:tensorflow:Saving checkpoints for 300 into /var/folders/3t/087xhfzj6knbg91h3f4np1pr0000gn/T/tmphicguchu/model.ckpt. INFO:tensorflow:Loss for final step: 177.97446. INFO:tensorflow:Calling model_fn. INFO:tensorflow:Done calling model_fn. INFO:tensorflow:Starting evaluation at 2019-03-11-14:54:59 INFO:tensorflow:Graph was finalized. INFO:tensorflow:Restoring parameters from /var/folders/3t/087xhfzj6knbg91h3f4np1pr0000gn/T/tmphicguchu/model.ckpt-300 INFO:tensorflow:Running local_init_op. INFO:tensorflow:Done running local_init_op. INFO:tensorflow:Finished evaluation at 2019-03-11-14:54:59 INFO:tensorflow:Saving dict for global step 300: average_loss = 184.05464, global_step = 300, label/mean = 22.53281, loss = 184.05464, prediction/mean = 10.174241 INFO:tensorflow:Saving 'checkpoint_path' summary for global step 300: /var/folders/3t/087xhfzj6knbg91h3f4np1pr0000gn/T/tmphicguchu/model.ckpt-300 {'average_loss': 184.05464, 'label/mean': 22.53281, 'loss': 184.05464, 'prediction/mean': 10.174241, 'global_step': 300} Default Graph : <tensorflow.python.framework.ops.Graph object at 0x10a292390> Default Session: None
cd $ML_PATH
source env/bin/activate
pip3 install --upgrade tensorflow
python3 -c 'import tensorflow; print(tensorflow.__version__)'
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
Default Graph : <tensorflow.python.framework.ops.Graph object at 0x10a292390> Default Session: None
tf.reset_default_graph()
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
x = tf.Variable(3, name="x")
y = tf.Variable(4, name="y")
f = x*x*y + y + 2
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print(f)
Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bde898> Default Session: None Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bde898> Default Session: None Tensor("add_1:0", shape=(), dtype=int32)
tf.get_default_graph().get_operations()
[<tf.Operation 'x/initial_value' type=Const>, <tf.Operation 'x' type=VariableV2>, <tf.Operation 'x/Assign' type=Assign>, <tf.Operation 'x/read' type=Identity>, <tf.Operation 'y/initial_value' type=Const>, <tf.Operation 'y' type=VariableV2>, <tf.Operation 'y/Assign' type=Assign>, <tf.Operation 'y/read' type=Identity>, <tf.Operation 'mul' type=Mul>, <tf.Operation 'mul_1' type=Mul>, <tf.Operation 'add' type=Add>, <tf.Operation 'add_1/y' type=Const>, <tf.Operation 'add_1' type=Add>]
sess = tf.Session()
sess.run(x.initializer)
sess.run(y.initializer)
result = sess.run(f)
print(result)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
sess.close()
42 Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None
with 블록을 활용한 default session 지정
default session이 생성된 상황에서는 run() 호출 가능
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
with tf.Session() as sess:
x.initializer.run()
y.initializer.run()
result = f.eval()
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print(sess is tf.get_default_session())
print()
print(result)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: <tensorflow.python.client.session.Session object at 0x1a37aafb70> True 42 Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
with tf.Session() as sess:
tf.get_default_session().run(x.initializer)
tf.get_default_session().run(y.initializer)
result = tf.get_default_session().run(f)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print(sess is tf.get_default_session())
print()
print(result)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: <tensorflow.python.client.session.Session object at 0x1a37ac6048> True 42 Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None
init = tf.global_variables_initializer()
with tf.Session():
init.run() # 실제 모든 변수 초기화
result = f.eval()
print(result)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
42 Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
init = tf.global_variables_initializer()
sess = tf.InteractiveSession()
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
init.run()
result = f.eval()
sess.close()
print(result)
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: <tensorflow.python.client.session.InteractiveSession object at 0xb34660400> 42 Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
tf.reset_default_graph()
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
x1 = tf.Variable(1)
print(x1.graph is tf.get_default_graph())
Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a35bdeb00> Default Session: None Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a37b27518> Default Session: None True
print("Default Graph :", tf.get_default_graph())
print("Default Session:", tf.get_default_session())
print()
graph = tf.Graph()
print(graph is tf.get_default_graph())
with graph.as_default():
x2 = tf.Variable(2)
print(graph is tf.get_default_graph())
print(x2.graph is tf.get_default_graph())
print(x2.graph is tf.get_default_graph()) # graph (== x2.graph)는 디폴트 그래프에서 분리된 다른 그래프
print(x2.graph is graph)
Default Graph : <tensorflow.python.framework.ops.Graph object at 0x1a37b27518> Default Session: None False True True False True
w = tf.constant(3)
x = w + 2
y = x + 5
z = x * 3
print(w)
print(x)
print(y)
print(z)
print()
with tf.Session() as sess:
print(y.eval()) # 10
print(z.eval()) # 15
Tensor("Const:0", shape=(), dtype=int32) Tensor("add:0", shape=(), dtype=int32) Tensor("add_1:0", shape=(), dtype=int32) Tensor("mul:0", shape=(), dtype=int32) 10 15
with tf.Session() as sess:
y_val, z_val = sess.run([y, z])
print(y_val) # 10
print(z_val) # 15
10 15
import numpy as np
from sklearn.datasets import fetch_california_housing
import tensorflow as tf
housing = fetch_california_housing()
print(housing.data.shape)
m = housing.data.shape[0]
# 각 훈련 샘플에 편향 특성(x_0 = 1) 추가
housing_data_plus_bias = np.c_[np.ones(shape=(m, 1)), housing.data]
print(housing_data_plus_bias.shape)
print()
print(housing.target.shape)
housing_target = housing.target.reshape(-1, 1)
print(housing_target.shape)
(20640, 8) (20640, 9) (20640,) (20640, 1)
tf.reset_default_graph()
X = tf.constant(housing_data_plus_bias, dtype=tf.float64, name="X")
y = tf.constant(housing_target, dtype=tf.float64, name="y")
XT = tf.transpose(X)
theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)
with tf.Session() as sess:
result = theta.eval()
print(result)
[[-3.69419202e+01] [ 4.36693293e-01] [ 9.43577803e-03] [-1.07322041e-01] [ 6.45065694e-01] [-3.97638942e-06] [-3.78654265e-03] [-4.21314378e-01] [-4.34513755e-01]]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]
print(scaled_housing_data_plus_bias.mean(axis=0))
print(scaled_housing_data_plus_bias.shape)
[ 1.00000000e+00 6.60969987e-17 5.50808322e-18 6.60969987e-17 -1.06030602e-16 -1.10161664e-17 3.44255201e-18 -1.07958431e-15 -8.52651283e-15] (20640, 9)
$X^T$: (9, 20640)
$X$: (20640, 9) <-- scaled_housing_data_plus_bias
$\theta$: (9, 1)
$X \cdot \theta$: (20640, 1)
$y$: (20640, 1) <-- housing_target
Gradient descent for getting $\theta$
tf.reset_default_graph()
learning_rate = 0.01
m, n = scaled_housing_data_plus_bias.shape
print("Number of samples: {0}, Number of features: {1}".format(m, n))
print()
X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing_target, dtype=tf.float32, name="y")
theta = tf.Variable(
tf.random_uniform(shape=[n, 1], minval=-1.0, maxval=1.0, seed=42),
name="theta"
)
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
gradients = 2/m * tf.matmul(tf.transpose(X), error)
training_op = tf.assign(theta, theta - learning_rate * gradients)
init = tf.global_variables_initializer()
n_epochs = 1000
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch", epoch, "MSE =", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print("Best theta: \n{0}".format(best_theta))
Number of samples: 20640, Number of features: 9 Epoch 0 MSE = 2.7544272 Epoch 100 MSE = 0.6322218 Epoch 200 MSE = 0.57277966 Epoch 300 MSE = 0.5585006 Epoch 400 MSE = 0.5490694 Epoch 500 MSE = 0.5422878 Epoch 600 MSE = 0.5373788 Epoch 700 MSE = 0.5338218 Epoch 800 MSE = 0.5312427 Epoch 900 MSE = 0.5293706 Best theta: [[ 2.0685523e+00] [ 7.7407807e-01] [ 1.3119239e-01] [-1.1784514e-01] [ 1.6477820e-01] [ 7.4408232e-04] [-3.9194513e-02] [-8.6135650e-01] [-8.2347965e-01]]
x = tf.constant(2.0)
y = x**2 + x - 1
grad = tf.gradients(y, x)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
grad_value = sess.run(grad)
print(grad_value)
[5.0]
tf.reset_default_graph()
learning_rate = 0.01
m, n = scaled_housing_data_plus_bias.shape
print("Number of samples: {0}, Number of features: {1}".format(m, n))
print()
X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing_target, dtype=tf.float32, name="y")
theta = tf.Variable(
tf.random_uniform(shape=[n, 1], minval=-1.0, maxval=1.0, seed=42),
name="theta"
)
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
###
gradients = tf.gradients(mse, theta)[0]
###
training_op = tf.assign(theta, theta - learning_rate * gradients)
init = tf.global_variables_initializer()
n_epochs = 1000
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch", epoch, "MSE =", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print("Best theta: \n{0}".format(best_theta))
Number of samples: 20640, Number of features: 9 Epoch 0 MSE = 2.7544272 Epoch 100 MSE = 0.6322219 Epoch 200 MSE = 0.57277966 Epoch 300 MSE = 0.5585006 Epoch 400 MSE = 0.54906934 Epoch 500 MSE = 0.54228777 Epoch 600 MSE = 0.5373788 Epoch 700 MSE = 0.5338219 Epoch 800 MSE = 0.5312427 Epoch 900 MSE = 0.5293704 Best theta: [[ 2.0685525e+00] [ 7.7407807e-01] [ 1.3119237e-01] [-1.1784511e-01] [ 1.6477817e-01] [ 7.4407971e-04] [-3.9194521e-02] [-8.6135668e-01] [-8.2347977e-01]]
tf.reset_default_graph()
learning_rate = 0.01
m, n = scaled_housing_data_plus_bias.shape
print("Number of samples: {0}, Number of features: {1}".format(m, n))
print()
X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing_target, dtype=tf.float32, name="y")
theta = tf.Variable(
tf.random_uniform(shape=[n, 1], minval=-1.0, maxval=1.0, seed=42),
name="theta"
)
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
###
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
###
init = tf.global_variables_initializer()
n_epochs = 1000
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch", epoch, "MSE =", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print("Best theta: \n{0}".format(best_theta))
Number of samples: 20640, Number of features: 9 Epoch 0 MSE = 2.7544272 Epoch 100 MSE = 0.6322219 Epoch 200 MSE = 0.57277966 Epoch 300 MSE = 0.5585006 Epoch 400 MSE = 0.54906934 Epoch 500 MSE = 0.54228777 Epoch 600 MSE = 0.5373788 Epoch 700 MSE = 0.5338219 Epoch 800 MSE = 0.5312427 Epoch 900 MSE = 0.5293704 Best theta: [[ 2.0685525e+00] [ 7.7407807e-01] [ 1.3119237e-01] [-1.1784511e-01] [ 1.6477817e-01] [ 7.4407971e-04] [-3.9194521e-02] [-8.6135668e-01] [-8.2347977e-01]]
tf.reset_default_graph()
learning_rate = 0.01
m, n = scaled_housing_data_plus_bias.shape
print("Number of samples: {0}, Number of features: {1}".format(m, n))
print()
X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X")
y = tf.constant(housing_target, dtype=tf.float32, name="y")
theta = tf.Variable(
tf.random_uniform(shape=[n, 1], minval=-1.0, maxval=1.0, seed=42),
name="theta"
)
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
###
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)
###
init = tf.global_variables_initializer()
n_epochs = 1000
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
print("Epoch", epoch, "MSE =", mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print("Best theta: \n{0}".format(best_theta))
Number of samples: 20640, Number of features: 9 Epoch 0 MSE = 2.7544272 Epoch 100 MSE = 0.5273161 Epoch 200 MSE = 0.52441406 Epoch 300 MSE = 0.5243281 Epoch 400 MSE = 0.52432173 Epoch 500 MSE = 0.524321 Epoch 600 MSE = 0.52432096 Epoch 700 MSE = 0.5243204 Epoch 800 MSE = 0.52432066 Epoch 900 MSE = 0.5243207 Best theta: [[ 2.068558 ] [ 0.82961667] [ 0.11875105] [-0.26552197] [ 0.30569217] [-0.00450318] [-0.03932618] [-0.8998918 ] [-0.87054694]]
tf.reset_default_graph()
A = tf.placeholder(tf.float32, shape=(None, 3))
B = A + 5
with tf.Session() as sess:
B_val_1 = B.eval(feed_dict={A: [[1, 2, 3]]})
B_val_2 = B.eval(feed_dict={A: [[4, 5, 6], [7, 8, 9]]})
B_val_3 = B.eval(feed_dict={A: [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]})
print(B_val_1, end="\n\n")
print(B_val_2, end="\n\n")
print(B_val_3)
[[6. 7. 8.]] [[ 9. 10. 11.] [12. 13. 14.]] [[ 6. 7. 8.] [ 9. 10. 11.] [12. 13. 14.] [15. 16. 17.]]
import numpy.random as rnd
tf.reset_default_graph()
learning_rate = 0.01
m, n = scaled_housing_data_plus_bias.shape
print("Number of samples: {0}, Number of features: {1}".format(m, n))
print()
X = tf.placeholder(tf.float32, shape=(None, n), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1), name="y")
theta = tf.Variable(
tf.random_uniform(shape=[n, 1], minval=-1.0, maxval=1.0, seed=42),
name="theta"
)
y_pred = tf.matmul(X, theta, name="predictions")
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
def fetch_batch(epoch, batch_index, batch_size):
indices = rnd.randint(m, size=batch_size)
X_batch = scaled_housing_data_plus_bias[indices]
y_batch = housing_target[indices]
return X_batch, y_batch
def fetch_batch2(epoch, batch_index, batch_size):
start_idx = batch_index * batch_size
end_idx = min(m, (batch_index + 1) * batch_size)
X_batch = scaled_housing_data_plus_bias[start_idx: end_idx]
y_batch = housing_target[start_idx: end_idx]
return X_batch, y_batch
n_epochs = 10
batch_size = 100
n_batches = int(np.ceil(m / batch_size))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
print("Epoch", epoch, "MSE =", mse.eval(
feed_dict={X: scaled_housing_data_plus_bias, y: housing_target})
)
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
best_theta = theta.eval()
print("Best theta: \n{0}".format(best_theta))
Number of samples: 20640, Number of features: 9 Epoch 0 MSE = 2.7544272 Epoch 1 MSE = 7.1942964 Epoch 2 MSE = 0.6038717 Epoch 3 MSE = 0.5658047 Epoch 4 MSE = 0.5324455 Epoch 5 MSE = 0.5322575 Epoch 6 MSE = 0.5278092 Epoch 7 MSE = 0.5279042 Epoch 8 MSE = 0.64588606 Epoch 9 MSE = 0.6614312 Best theta: [[ 2.0692444 ] [ 0.8011928 ] [ 0.11612897] [-0.25423694] [ 0.15042946] [ 0.00301097] [-0.09266656] [-0.898888 ] [-0.86405325]]