%run -m ipy_startup
%matplotlib inline
# Boilerplate initialization stuff that's easier to put in files on the PYTHONPATH like the "ipy_startup"
# file above (which loads pandas, numpy, matplotlib), but is included here just for clarity:
# Plotly Initialization
import plotly as plty
import plotly.graph_objs as go
import cufflinks as cf
cf.set_config_file(offline=True, theme='white', offline_link_text=None, offline_show_link=False)
def gen_data():
# Seed number generator so we always get the same thing
np.random.seed(1)
# Set number of random data points to create
n = 200
# Generate x and y values
x = np.random.randn(n) + 4
y = np.where(x < 3.5, 3.5 + (x-3.5)**2, x) + .2 * np.random.randn(n)
return x, y
x, y = gen_data()
# Plot what we've got
plt.figure()
plt.scatter(x, y)
plt.title('Random Data for Modeling')
plt.xlabel('x')
plt.ylabel('y')
<matplotlib.text.Text at 0x112cc1048>
def plot_predictions(est, x_start=-1, x_stop=10, x_num=100):
""" Trains a given estimator and plots what it learned vs training data"""
# Create X data to predict
xp = np.linspace(x_start, x_stop, x_num)
# Train estimator and get predictions for desired grid
yp = est.fit(np.expand_dims(x, 1), y).predict(np.expand_dims(xp, 1))
# Plot original data as well as predictions
plt.figure()
plt.scatter(x, y, alpha=.5)
plt.plot(xp, yp, c='r', linewidth=2, alpha=.8)
plt.title('Decision Surface ({})'.format(est.__class__.__name__))
from sklearn.tree import DecisionTreeRegressor
plot_predictions(DecisionTreeRegressor())
from sklearn.ensemble import GradientBoostingRegressor
plot_predictions(GradientBoostingRegressor(subsample=.8))
from sklearn.svm import SVR
plot_predictions(SVR(C=10.))
from sklearn.neighbors import KNeighborsRegressor
plot_predictions(KNeighborsRegressor())
from sklearn.neural_network import MLPRegressor
plot_predictions(MLPRegressor(random_state=1))
Perhaps some of the MLP Regressor arguments will help make this model better?
??MLPRegressor
est = MLPRegressor(random_state=1, hidden_layer_sizes=(2,))
#est = MLPRegressor(random_state=1, hidden_layer_sizes=(10,), solver='sgd')
#est = MLPRegressor(random_state=1, hidden_layer_sizes=(10,), solver='sgd', learning_rate_init=.00001)
plot_predictions(est)
/Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/sklearn/neural_network/multilayer_perceptron.py:563: ConvergenceWarning: Stochastic Optimizer: Maximum iterations reached and the optimization hasn't converged yet.
est = MLPRegressor(random_state=1, hidden_layer_sizes=(10,), solver='sgd', batch_size=1)
plot_predictions(est)
# Functions used to create the Loss Surface for this problem -- this is a little off
# topic so need to focus on it
def get_x_grid(start=-10, stop=10, num=100):
v = np.linspace(start, stop, num)
g = np.hstack([np.expand_dims(x.ravel(), 1) for x in np.meshgrid(v, v)])
g = g.astype(np.float64)
return g, v
def get_mse(w, b):
z = x * w + b
y_ = np.where(z >= 0, z, 0)
#y_ = 1 / (1 + np.exp(-z))
return np.mean((y_ - y)**2)
def get_mse_grid(g):
mse = []
for i in range(len(g)):
b, w = g[i][0], g[i][1]
mse.append(get_mse(w, b))
mse = np.clip(np.array(mse), -30, 30)
return mse
g, v = get_x_grid()
mse = get_mse_grid(g)
# Plot computed Loss Surface
trace = go.Surface(x=v, y=v, z=mse.reshape((len(v), -1), order='C'), colorscale='Jet')
layout = go.Layout(
scene=dict(xaxis=dict(title='b'), yaxis=dict(title='w'), zaxis=dict(title='MSE')),
title='Mean Squared Error Loss<br>*Assuming we had only two parameters '
)
fig = go.Figure(data=[trace], layout=layout)
plty.offline.iplot(fig)
get_mse(0, 0)
19.76528863484338
get_mse(-10, 10)
19.76528863484338
Using a batch size of 1 forces the optimizer to move more randomly, which means there's a better chance it will fall in the "canyon", but that's not guaranteed.
Tensorflow is:
import tensorflow as tf
Do something simple:
# Create a constant
tf.constant(1)
<tf.Tensor 'Const:0' shape=() dtype=int32>
tf.constant(2)
<tf.Tensor 'Const_1:0' shape=() dtype=int32>
Do something that seems like it would be more useful:
a = tf.constant(1)
b = tf.constant(2)
c = a + b
print(a)
print(b)
print(c)
Tensor("Const_2:0", shape=(), dtype=int32) Tensor("Const_3:0", shape=(), dtype=int32) Tensor("add:0", shape=(), dtype=int32)
Now do the same thing within a "session":
with tf.Session() as sess:
print(sess.run([a, b, c]))
[1, 2, 3]
# Equivalent to the above
with tf.Session(graph=tf.get_default_graph()) as sess:
print(sess.run([a, b, c]))
[1, 2, 3]
# Graphs can be tricky to work with, so it helps to be able to see what's associated with them
graph = tf.get_default_graph()
graph.get_operations()
[<tf.Operation 'Const' type=Const>, <tf.Operation 'Const_1' type=Const>, <tf.Operation 'Const_2' type=Const>, <tf.Operation 'Const_3' type=Const>, <tf.Operation 'add' type=Add>]
# We can also do things by name, so they're easier to track
a = tf.constant(1, name='a')
b = tf.constant(2, name='b')
c = tf.add(a, b, name='c')
graph.get_operations()
[<tf.Operation 'Const' type=Const>, <tf.Operation 'Const_1' type=Const>, <tf.Operation 'Const_2' type=Const>, <tf.Operation 'Const_3' type=Const>, <tf.Operation 'add' type=Add>, <tf.Operation 'a' type=Const>, <tf.Operation 'b' type=Const>, <tf.Operation 'c' type=Add>]
Notice how everything just keeps getting tacked on to the default graph. This becomes a problem in a hurry (especially in a notebook) so it's almost always better to scope things to a single graph using a "with" statement:
g = tf.Graph()
with g.as_default():
a = tf.constant(1, name='a')
b = tf.constant(2, name='b')
c = tf.add(a, b, name='c')
print(g.get_operations())
[<tf.Operation 'a' type=Const>, <tf.Operation 'b' type=Const>, <tf.Operation 'c' type=Add>]
Or if you really need to, you can also clear the default graph:
tf.reset_default_graph()
tf.get_default_graph().get_operations()
[]
Adding operations to a graph can be done in a "with" block:
tf.logging.set_verbosity(tf.logging.DEBUG)
g = tf.Graph()
with g.as_default():
# Create a counter (circular dependency)
ct = tf.Variable(0, name='ct_init')
ct = tf.assign(ct, ct + 1, name='ct')
# Multiply some variable by the counter value
x = tf.Variable([0, 1, 2], name='x')
y = tf.identity(x * ct, name='y')
init = tf.global_variables_initializer()
with tf.Session(graph=g) as sess:
sess.run(init)
print(sess.run([ct, y]))
print(sess.run([ct, y]))
print(sess.run([ct, y]))
[1, array([0, 1, 2], dtype=int32)] [2, array([0, 2, 4], dtype=int32)] [3, array([0, 3, 6], dtype=int32)]
The "finalize" method will ensure that any more operations added to a graph will cause an error
g.finalize()
with g.as_default():
z = x * 2
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-39-7bafd586bad8> in <module>() 1 with g.as_default(): ----> 2 z = x * 2 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback) 75 value = type() 76 try: ---> 77 self.gen.throw(type, value, traceback) 78 raise RuntimeError("generator didn't stop after throw()") 79 except StopIteration as exc: /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in get_controller(self, default) 3679 try: 3680 self.stack.append(default) -> 3681 yield default 3682 finally: 3683 if self._enforce_nesting: <ipython-input-39-7bafd586bad8> in <module>() 1 with g.as_default(): ----> 2 z = x * 2 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/ops/variables.py in _run_op(a, *args) 704 def _run_op(a, *args): 705 # pylint: disable=protected-access --> 706 return getattr(ops.Tensor, operator)(a._AsTensor(), *args) 707 # Propagate __doc__ to wrapper 708 try: /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y) 882 if not isinstance(y, sparse_tensor.SparseTensor): 883 y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y") --> 884 return func(x, y, name=name) 885 886 def binary_op_wrapper_sparse(sp_x, y): /Users/eczech/anaconda/envs/research3.5/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback) 75 value = type() 76 try: ---> 77 self.gen.throw(type, value, traceback) 78 raise RuntimeError("generator didn't stop after throw()") 79 except StopIteration as exc: /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in name_scope(name, default_name, values) 4219 g = _get_graph_from_inputs(values) 4220 with g.as_default(), g.name_scope(n) as scope: -> 4221 yield scope 4222 # pylint: enable=g-doc-return-or-yield 4223 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback) 75 value = type() 76 try: ---> 77 self.gen.throw(type, value, traceback) 78 raise RuntimeError("generator didn't stop after throw()") 79 except StopIteration as exc: /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in get_controller(self, default) 3679 try: 3680 self.stack.append(default) -> 3681 yield default 3682 finally: 3683 if self._enforce_nesting: /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in name_scope(name, default_name, values) 4219 g = _get_graph_from_inputs(values) 4220 with g.as_default(), g.name_scope(n) as scope: -> 4221 yield scope 4222 # pylint: enable=g-doc-return-or-yield 4223 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback) 75 value = type() 76 try: ---> 77 self.gen.throw(type, value, traceback) 78 raise RuntimeError("generator didn't stop after throw()") 79 except StopIteration as exc: /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in name_scope(self, name) 2916 new_stack = self.unique_name(name) 2917 self._name_stack = new_stack -> 2918 yield "" if new_stack is None else new_stack + "/" 2919 finally: 2920 self._name_stack = old_stack /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in name_scope(name, default_name, values) 4219 g = _get_graph_from_inputs(values) 4220 with g.as_default(), g.name_scope(n) as scope: -> 4221 yield scope 4222 # pylint: enable=g-doc-return-or-yield 4223 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y) 881 with ops.name_scope(None, op_name, [x, y]) as name: 882 if not isinstance(y, sparse_tensor.SparseTensor): --> 883 y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y") 884 return func(x, y, name=name) 885 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, preferred_dtype) 649 name=name, 650 preferred_dtype=preferred_dtype, --> 651 as_ref=False) 652 653 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype) 714 715 if ret is None: --> 716 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) 717 718 if ret is NotImplemented: /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref) 174 as_ref=False): 175 _ = as_ref --> 176 return constant(v, dtype=dtype, name=name) 177 178 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name, verify_shape) 167 const_tensor = g.create_op( 168 "Const", [], [dtype_value.type], --> 169 attrs={"value": tensor_value, "dtype": dtype_value}, name=name).outputs[0] 170 return const_tensor 171 /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in create_op(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_shapes, compute_device) 2352 2353 """ -> 2354 self._check_not_finalized() 2355 for idx, a in enumerate(inputs): 2356 if not isinstance(a, Tensor): /Users/eczech/anaconda/envs/research3.5/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in _check_not_finalized(self) 2075 """ 2076 if self._finalized: -> 2077 raise RuntimeError("Graph is finalized and cannot be modified.") 2078 2079 def _add_op(self, op): RuntimeError: Graph is finalized and cannot be modified.
A tensorflow graph can be exported for inspection in Tensorboard as well:
def write_graph(directory, graph):
writer = tf.summary.FileWriter(directory)
writer.add_graph(graph)
writer.flush()
print('Graph written to dir "{}"'.format(directory))
write_graph('/tmp/tf/graph1', g)
Graph written to dir "/tmp/tf/graph1"
# Create a graph to scope everything to
g = tf.Graph()
# Re-assign data to model
x, y = gen_data()
def get_regression_model_components(g, activation=None, initial_value=0.):
""" Generate TF operations and tensors for linear regression"""
with g.as_default():
# Create "placeholders" which will represent things that will be fed to the graph
# * This is annoying, but makes things like online learning possible
x = tf.placeholder(tf.float32, shape=[None], name='x')
y = tf.placeholder(tf.float32, shape=[None], name='y')
# Create variables to represent the slope and intercept (both initialized to 0 here)
b = tf.Variable(initial_value, name='b')
w = tf.Variable(initial_value, name='w')
# Get "y" estimates by multiplying to-be-specified-later x values by slope, and then add intercept
p = x * w + b
# Apply the "activation" function if one was given
if activation is not None:
p = activation(p)
p = tf.identity(p, name='prediction')
# Determine MSE for the current predictions
mse = tf.losses.mean_squared_error(p, y)
# Specify the kind of optimization that we'd like to use and what we'd like to apply it to
op = tf.train.GradientDescentOptimizer(.01).minimize(mse, name='optimize')
# Return just about everything (which is also annoying)
return x, y, p, op, mse
def train_model(g, x_arg, y_arg, prediction, optimize, mse):
# Create a variable initializer operation and also add it to the graph
# * This is often done separately from the model declaration for good reason
with g.as_default():
init = tf.global_variables_initializer()
# Keep track of MSE at each step with this list
losses = []
# Training always happens within a session
with tf.Session(graph=g) as sess:
# Initialize variables (slope and intercept in this case)
sess.run(init)
# For 100 steps, compute the new mse -- behind the scenes, TF will be calculating gradients
# for the variables and using that gradient to make a new guess in the right direction
# NOTE: must pass in "optimize" step or nothing will ever change
for i in range(100):
op, loss = sess.run([optimize, mse], feed_dict={x_arg: x, y_arg: y})
if i % 10 == 0:
print('Loss at step {}: {}'.format(i, loss))
losses.append(loss)
# Finally, see what this model has learned after 100 iterations by making predictions from it
x_pred = np.linspace(-1, 10, 100)
y_pred = sess.run(prediction, feed_dict={x_arg: x_pred, y_arg: x_pred})
return losses, x_pred, y_pred
x_arg, y_arg, prediction, optimize, mse = get_regression_model_components(g)
losses, x_pred, y_pred = train_model(g, x_arg, y_arg, prediction, optimize, mse)
Loss at step 0: 19.765287399291992 Loss at step 10: 0.6865646839141846 Loss at step 20: 0.6798950433731079 Loss at step 30: 0.674996554851532 Loss at step 40: 0.670184314250946 Loss at step 50: 0.6654569506645203 Loss at step 60: 0.6608127355575562 Loss at step 70: 0.6562504768371582 Loss at step 80: 0.6517684459686279 Loss at step 90: 0.6473655104637146
# See how the loss has decreased with each step
plt.plot(losses)
[<matplotlib.lines.Line2D at 0x13a939668>]
# Plot the predictions from the model
plt.scatter(x, y)
plt.plot(x_pred, y_pred)
[<matplotlib.lines.Line2D at 0x13a97b160>]
# Create a different graph
g = tf.Graph()
# Re-assign data (just to be safe)
x, y = gen_data()
import pdb
def tf_print(t, transform=None):
""" Tensor print function"""
def log_value(x):
# pdb.set_trace()
logger.info('{} - {}'.format(t.name, x if transform is None else transform(x)))
return x
# Because gradient-less custom operations do not operate as expected in a TF graph,
# we need to do something special in order for a print operation to really not have
# any effect on the overall data flow. In these case, we create an operation that
# does the logging and force a dependency using the control_dependencies function
log_op = tf.py_func(log_value, [t], [t.dtype], name=t.name.split(':')[0])[0]
with tf.control_dependencies([log_op]):
r = tf.identity(t)
return r
def get_regression_model_components(g, activation=None, initial_value=0.):
""" Generate TF operations and tensors for linear regression"""
with g.as_default():
# Create "placeholders" which will represent things that will be fed to the graph
# * This is annoying, but makes things like online learning possible
x = tf.placeholder(tf.float32, shape=[None], name='x')
y = tf.placeholder(tf.float32, shape=[None], name='y')
# Create variables to represent the slope and intercept (both initialized to 0 here)
b = tf.Variable(initial_value, name='b')
w = tf.Variable(initial_value, name='w')
#print(w)
# def print_tensor(x):
# print(x)
# return x
# What happens if we use the output of a custom function directly in a graph?
# w = tf.py_func(print_tensor, [w], [w.dtype])
# b = tf.py_func(print_tensor, [b], [b.dtype])
#w, b = tf_print(w), tf_print(b)
# Get "y" estimates by multiplying to-be-specified-later x values by slope, and then add intercept
p = x * w + b
tf.summary.histogram('predictions', p)
# Apply the "activation" function if one was given
if activation is not None:
p = activation(p)
p = tf.identity(p, name='prediction')
# Determine MSE for the current predictions
mse = tf.losses.mean_squared_error(p, y)
# Specify the kind of optimization that we'd like to use and what we'd like to apply it to
op = tf.train.GradientDescentOptimizer(.01).minimize(mse, name='optimize')
# Add variable summaries
tf.summary.scalar('loss', mse)
tf.summary.scalar('slope', w)
tf.summary.scalar('intercept', b)
# Return just about everything (which is also annoying)
return x, y, p, w, b, op, mse
def train_model(g, x_arg, y_arg, prediction, w, b, optimize, mse, model_dir):
writer = tf.summary.FileWriter(model_dir, graph=g)
# Create a variable initializer operation and also add it to the graph
# * This is often done separately from the model declaration for good reason
# Also generate a "merged" summary operation that can be called at every training
# step or only infrequently
with g.as_default():
init = tf.global_variables_initializer()
summaries = tf.summary.merge_all()
# Keep track of MSE at each step with this list
losses = []
# Training always happens within a session
with tf.Session(graph=g) as sess:
# Initialize variables (slope and intercept in this case)
sess.run(init)
# For 100 steps, compute the new mse -- behind the scenes, TF will be calculating gradients
# for the variables and using that gradient to make a new guess in the right direction
for i in range(100):
op, loss, summary = sess.run([optimize, mse, summaries], feed_dict={x_arg: x, y_arg: y})
losses.append(loss)
writer.add_summary(summary, i)
# Finally, see what this model has learned after 100 iterations by making predictions from it
x_pred = np.linspace(-1, 10, 100)
y_pred = sess.run(prediction, feed_dict={x_arg: x_pred, y_arg: x_pred})
w_val, b_val = sess.run([w, b], feed_dict={x_arg: x_pred, y_arg: x_pred})
writer.flush()
return losses, x_pred, y_pred, w_val, b_val
# Show difference between initial value settings
x_arg, y_arg, prediction, w, b, optimize, mse = get_regression_model_components(
g, activation=tf.nn.relu, initial_value=0.1
)
model_dir = '/tmp/tf/model1'
!rm $model_dir/*
losses, x_pred, y_pred, w, b = train_model(g, x_arg, y_arg, prediction, w, b, optimize, mse, model_dir)
plt.plot(losses)
[<matplotlib.lines.Line2D at 0x142b7ff28>]
# Plot the predictions from the model
plt.scatter(x, y)
plt.plot(x_pred, y_pred)
[<matplotlib.lines.Line2D at 0x142b5a320>]
w, b
(0.92047602, 0.50675946)
To view tensorboard for the above model:
# Change to the directory containing the model "events" file
cd $model_dir
# Activate the correct python environment
source activate <env>
# Start tensorboard and open the URL it says to in a browser
tensorboard --logdir=.
There are now a good number of libraries that work as abtractions of Tensorflow and try to simplify things for you:
x, y = gen_data()
The TFLearn version of the single neuron model above looks like this:
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
def model_fn(X, y, mode, params):
tf.set_random_seed(1)
b0 = tf.Variable(tf.random_normal([1], dtype=tf.float32), name='b0')
w0 = tf.Variable(tf.random_normal([1], dtype=tf.float32), name='w0')
y_ = tf.nn.relu(b0 + w0 * X)
loss = tf.identity(tf.losses.mean_squared_error(y, tf.squeeze(y_, axis=1)), name='loss')
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params['learning_rate'],
optimizer="Adam"
)
return tf.contrib.learn.ModelFnOps(mode, loss=loss, predictions={'values': y_}, train_op=train_op)
est = SKCompat(tf.contrib.learn.Estimator(model_fn=model_fn, params={'learning_rate':.01, 'alpha': .00001}))
est = est.fit(x.astype(np.float32), y.astype(np.float32), max_steps=500, batch_size=100)
WARNING:tensorflow:Using temporary folder as model directory: /var/folders/6g/kdqcxdms5dg0dr83wxn3ydjcsy9pxl/T/tmpynvv1o82 INFO:tensorflow:Using default config. INFO:tensorflow:Using config: {'_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x1423215f8>, '_task_type': None, '_tf_config': gpu_options { per_process_gpu_memory_fraction: 1 } , '_evaluation_master': '', '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_tf_random_seed': None, '_keep_checkpoint_every_n_hours': 10000, '_master': '', '_environment': 'local', '_task_id': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_secs': 600, '_num_ps_replicas': 0} DEBUG:tensorflow:Setting feature info to TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None), Dimension(1)]), is_sparse=False). DEBUG:tensorflow:Setting labels info to TensorSignature(dtype=tf.float32, shape=TensorShape([Dimension(None)]), is_sparse=False) INFO:tensorflow:Create CheckpointSaverHook. INFO:tensorflow:Saving checkpoints for 1 into /var/folders/6g/kdqcxdms5dg0dr83wxn3ydjcsy9pxl/T/tmpynvv1o82/model.ckpt. INFO:tensorflow:step = 1, loss = 54.251 INFO:tensorflow:global_step/sec: 507.477 INFO:tensorflow:step = 101, loss = 10.7431 INFO:tensorflow:global_step/sec: 545.408 INFO:tensorflow:step = 201, loss = 2.25133 INFO:tensorflow:global_step/sec: 394.117 INFO:tensorflow:step = 301, loss = 1.009 INFO:tensorflow:global_step/sec: 343.753 INFO:tensorflow:step = 401, loss = 1.59828 INFO:tensorflow:Saving checkpoints for 500 into /var/folders/6g/kdqcxdms5dg0dr83wxn3ydjcsy9pxl/T/tmpynvv1o82/model.ckpt. INFO:tensorflow:Loss for final step: 1.6989.
# You can get at any of the variables used in the model like this:
est._estimator.get_variable_names()
['OptimizeLoss/b0/Adam', 'OptimizeLoss/b0/Adam_1', 'OptimizeLoss/beta1_power', 'OptimizeLoss/beta2_power', 'OptimizeLoss/learning_rate', 'OptimizeLoss/w0/Adam', 'OptimizeLoss/w0/Adam_1', 'b0', 'global_step', 'w0']
est._estimator.get_variable_value('b0'), est._estimator.get_variable_value('w0')
(array([-1.60145605], dtype=float32), array([ 1.41827154], dtype=float32))
# Can also just call ".predict" to get predictions
xp = np.linspace(-2, 10, num=100)
yp = est.predict(xp.astype(np.float32), outputs='values')['values'][:, 0]
plt.plot(xp, yp)
plt.scatter(x, y)
<matplotlib.collections.PathCollection at 0x146256a90>