from __future__ import absolute_import, division, print_function
import os, sys
import numpy as np
import tensorflow as tf
print(tf.__version__)
1.12.0
TensorFlow doesn't know what nodes should be grouped together, unless you tell it to
with tf.name_scope(name_of_that_scope):
# declare op_1
# declare op_2
# ...
Don't use tf.Variable function.. Just see below
tf.get_variable
and tf.variable_scope
are deprecated in tensorflow 2.0
tf.reset_default_graph()
with tf.name_scope('y'):
a1 = tf.Variable(initial_value = 1., name = 'a')
a2 = tf.Variable(initial_value = 2., name = 'b')
with tf.name_scope('y'):
a3 = tf.Variable(initial_value = 3., name = 'c')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
print('name : {}, value : {}'.format(a3.name, a3.eval()))
name : y/a:0, value : 1.0 name : y/b:0, value : 2.0 name : y_1/c:0, value : 3.0
tf.reset_default_graph()
with tf.variable_scope('y'):
a1 = tf.Variable(initial_value = 1., name = 'a')
a2 = tf.Variable(initial_value = 2., name = 'b')
with tf.variable_scope('y'):
a3 = tf.Variable(initial_value = 3., name = 'c')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
print('name : {}, value : {}'.format(a3.name, a3.eval()))
name : y/a:0, value : 1.0 name : y/b:0, value : 2.0 name : y_1/c:0, value : 3.0
using tf.get_variable
instead of the tf.Variable
before tensorflow 2.0
tf.reset_default_graph()
with tf.name_scope('y'):
a1 = tf.get_variable(name = 'a', initializer = tf.constant(1))
a2 = tf.get_variable(name = 'b', initializer = tf.constant(2))
with tf.name_scope('y'):
a3 = tf.get_variable(name = 'c', initializer = tf.constant(3))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
print('name : {}, value : {}'.format(a3.name, a3.eval()))
name : a:0, value : 1 name : b:0, value : 2 name : c:0, value : 3
tf.reset_default_graph()
with tf.variable_scope('y'):
a1 = tf.get_variable(name = 'a', initializer = tf.constant(1))
a2 = tf.get_variable(name = 'b', initializer = tf.constant(2))
with tf.variable_scope('y'):
a3 = tf.get_variable(name = 'c', initializer = tf.constant(3))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
print('name : {}, value : {}'.format(a3.name, a3.eval()))
name : y/a:0, value : 1 name : y/b:0, value : 2 name : y/c:0, value : 3
tf.reset_default_graph()
with tf.variable_scope('y'):
a1 = tf.get_variable(name = 'a', initializer = tf.constant(1))
a2 = tf.get_variable(name = 'b', initializer = tf.constant(2))
try:
with tf.variable_scope('y'):
a3 = tf.get_variable(name = 'a', initializer = tf.constant(3))
except ValueError:
print('Variable already exists')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
print('name : {}, value : {}'.format(a3.name, a3.eval()))
Variable already exists name : y/a:0, value : 1 name : y/b:0, value : 2
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-6-72d5dd726ec6> in <module> 14 print('name : {}, value : {}'.format(a1.name, a1.eval())) 15 print('name : {}, value : {}'.format(a2.name, a2.eval())) ---> 16 print('name : {}, value : {}'.format(a3.name, a3.eval())) /opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/variables.py in eval(self, session) 1649 A numpy `ndarray` with a copy of the value of this variable. 1650 """ -> 1651 return self._variable.eval(session=session) 1652 1653 def initialized_value(self): /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in eval(self, feed_dict, session) 711 712 """ --> 713 return _eval_using_default_session(self, feed_dict, self.graph, session) 714 715 /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in _eval_using_default_session(tensors, feed_dict, graph, session) 5146 "`eval(session=sess)`") 5147 if session.graph is not graph: -> 5148 raise ValueError("Cannot use the default session to evaluate tensor: " 5149 "the tensor's graph is different from the session's " 5150 "graph. Pass an explicit session to " ValueError: Cannot use the default session to evaluate tensor: the tensor's graph is different from the session's graph. Pass an explicit session to `eval(session=sess)`.
tf.reset_default_graph()
with tf.variable_scope('y') as scope:
a1 = tf.Variable(1., name = 'a')
scope.reuse_variables()
a2 = tf.Variable(2., name = 'a')
with tf.variable_scope('y', reuse = tf.AUTO_REUSE):
a3 = tf.Variable(3., name = 'a')
a4 = tf.Variable(4., name = 'a')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
print('name : {}, value : {}'.format(a3.name, a3.eval()))
print('name : {}, value : {}'.format(a4.name, a4.eval()))
name : y/a:0, value : 1.0 name : y/a_1:0, value : 2.0 name : y_1/a:0, value : 3.0 name : y_1/a_1:0, value : 4.0
tf.reset_default_graph()
with tf.variable_scope('y') as scope:
a1 = tf.get_variable(initializer = tf.constant(1.), name = 'a')
scope.reuse_variables()
a2 = tf.get_variable(initializer = tf.constant(2.), name = 'a')
with tf.variable_scope('y', reuse = tf.AUTO_REUSE):
a3 = tf.get_variable(initializer = tf.constant(3.), name = 'a')
a4 = tf.get_variable(initializer = tf.constant(4.), name = 'a')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
print('name : {}, value : {}'.format(a3.name, a3.eval()))
print('name : {}, value : {}'.format(a4.name, a4.eval()))
name : y/a:0, value : 1.0 name : y/a:0, value : 1.0 name : y/a:0, value : 1.0 name : y/a:0, value : 1.0
tf.reset_default_graph()
with tf.variable_scope('y') as scope:
a1 = tf.get_variable(initializer = tf.constant(1.), name = 'a')
scope.reuse_variables()
a2 = tf.get_variable(initializer = tf.constant(2.), name = 'a')
with tf.variable_scope('y', reuse = tf.AUTO_REUSE):
a3 = tf.get_variable(initializer = tf.constant(3.), name = 'a')
a4 = tf.get_variable(initializer = tf.constant(4.), name = 'a')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
print('name : {}, value : {}'.format(a3.name, a3.eval()))
print('name : {}, value : {}'.format(a4.name, a4.eval()))
name : y/a:0, value : 1.0 name : y/a:0, value : 1.0 name : y/a:0, value : 1.0 name : y/a:0, value : 1.0
So what's the difference between name_scope and variable_scope?
--> While both create namespaces, the main thing variable_scope does is to facilitate variable sharing.
# when using name scope, variable sharing ....
tf.reset_default_graph()
with tf.name_scope('y'):
a1 = tf.get_variable(name = 'a', initializer = tf.constant(1.))
tf.get_variable_scope().reuse_variables()
with tf.name_scope('y'):
a2 = tf.get_variable(name = 'a', initializer = tf.constant(2.))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
name : a:0, value : 1.0 name : a:0, value : 1.0
tf.reset_default_graph()
with tf.variable_scope('y'):
a1 = tf.get_variable(name = 'a', initializer = tf.constant(1.))
with tf.variable_scope('y') as scope:
scope.reuse_variables()
a2 = tf.get_variable(name = 'a', initializer = tf.constant(2.))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
name : y/a:0, value : 1.0 name : y/a:0, value : 1.0
tf.reset_default_graph()
with tf.variable_scope('y'):
a1 = tf.get_variable(name = 'a', initializer = tf.constant(1.))
with tf.variable_scope('y', reuse = tf.AUTO_REUSE):
a2 = tf.get_variable(name = 'a', initializer = tf.constant(2.))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('name : {}, value : {}'.format(a1.name, a1.eval()))
print('name : {}, value : {}'.format(a2.name, a2.eval()))
name : y/a:0, value : 1.0 name : y/a:0, value : 1.0
def two_hidden_layers(x):
assert x.shape.as_list() == [200, 100]
w1 = tf.Variable(name = 'h1_weights', initial_value = tf.random_normal([100, 50]))
b1 = tf.Variable(name = 'h1_biases', initial_value = tf.zeros(50))
h1 = tf.matmul(x, w1) + b1
assert h1.shape.as_list() == [200, 50]
w2 = tf.Variable(name = 'h2_weights', initial_value = tf.random_normal([50, 10]))
b2 = tf.Variable(name = 'h2_biases', initial_value = tf.zeros(10))
logits = tf.matmul(h1, w2) + b2
return logits
# result : Two sets of variables are created.
tf.reset_default_graph()
x1 = tf.truncated_normal(shape = [200, 100], name = 'x1')
x2 = tf.truncated_normal(shape = [200, 100], name = 'x2')
logits1 = two_hidden_layers(x1)
logits2 = two_hidden_layers(x2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
l1 = sess.run(fetches = logits1, feed_dict = {x1 : np.ones(shape = [200, 100],)})
l2 = sess.run(fetches = logits2, feed_dict = {x2 : np.ones(shape = [200, 100])})
print(np.all(l1 == l2))
False
writer = tf.summary.FileWriter(logdir = '../graphs/lecture05/var_sharing_wo',
graph = tf.get_default_graph())
writer.close()
When using variable sharing, replace tf.Variable with tf.get_variable
with tf.variable_socpe(name) as scope:
...
scope.reuse_variables()
...
def two_hidden_layers(x):
assert x.shape.as_list() == [200, 100]
w1 = tf.get_variable(name = 'h1_weights',
shape = [100, 50],
initializer = tf.random_normal_initializer())
b1 = tf.get_variable(name = 'h1_biases',
shape = 50,
initializer = tf.zeros_initializer())
h1 = tf.matmul(x, w1) + b1
assert h1.shape.as_list() == [200, 50]
w2 = tf.get_variable(name = 'h2_weights',
shape = [50, 10],
initializer = tf.random_normal_initializer())
b2 = tf.get_variable(name = 'h2_biases',
shape = 10,
initializer = tf.random_normal_initializer())
logits = tf.matmul(h1, w2) + b2
return logits
tf.reset_default_graph()
x1 = tf.truncated_normal(shape = [200, 100], name = 'x1')
x2 = tf.truncated_normal(shape = [200, 100], name = 'x2')
with tf.variable_scope('two_layers') as scope:
logits1 = two_hidden_layers(x1)
scope.reuse_variables()
logits2 = two_hidden_layers(x2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
l1 = sess.run(fetches = logits1, feed_dict = {x1 : np.ones(shape = [200, 100])})
l2 = sess.run(fetches = logits2, feed_dict = {x2 : np.ones(shape = [200, 100])})
print('name : {}'.format(logits1.name))
print('name : {}'.format(logits2.name))
print(np.all(l1 == l2))
name : two_layers/add_1:0 name : two_layers/add_3:0 True
writer = tf.summary.FileWriter(logdir = '../graphs/lecture05/var_sharing_w1',
graph = tf.get_default_graph())
writer.close()
Graph is the same of example 1's result
with tf.variable_scope(name, reuse = tf.AUTO_REUSE):
...
tf.reset_default_graph()
x1 = tf.truncated_normal(shape = [200, 100], name = 'x1')
x2 = tf.truncated_normal(shape = [200, 100], name = 'x2')
with tf.variable_scope('two_layers', reuse = tf.AUTO_REUSE):
logits1 = two_hidden_layers(x1)
logits2 = two_hidden_layers(x2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
l1 = sess.run(fetches = logits1, feed_dict = {x1 : np.ones(shape = [200, 100])})
l2 = sess.run(fetches = logits2, feed_dict = {x2 : np.ones(shape = [200, 100])})
print('name : {}'.format(logits1.name))
print('name : {}'.format(logits2.name))
print(np.all(l1 == l2))
name : two_layers/add_1:0 name : two_layers/add_3:0 True
writer = tf.summary.FileWriter(logdir = '../graphs/lecture05/va_sharing_w2',
graph = tf.get_default_graph())
writer.close()
def fully_connected(x, output_dim, scope):
with tf.variable_scope(scope, reuse = tf.AUTO_REUSE) as scope:
w = tf.get_variable('weights', [x.shape[1], output_dim], initializer=tf.random_normal_initializer())
b = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, w) + b
def two_hidden_layers(x):
h = fully_connected(x, 50, 'h1')
h = fully_connected(h, 10, 'h2')
return h
tf.reset_default_graph()
x1 = tf.truncated_normal(shape = [200, 100], name = 'x1')
x2 = tf.truncated_normal(shape = [200, 100], name = 'x2')
logits1 = two_hidden_layers(x1)
logits2 = two_hidden_layers(x2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
l1 = sess.run(fetches = logits1, feed_dict = {x1 : np.ones(shape = [200, 100])})
l2 = sess.run(fetches = logits2, feed_dict = {x2 : np.ones(shape = [200, 100])})
print('name : {}'.format(logits1.name))
print('name : {}'.format(logits2.name))
print(np.all(l1 == l2))
name : h2/add:0 name : h2_1/add:0 True
writer = tf.summary.FileWriter(logdir = '../graphs/lecture05/va_sharing_w3',
graph = tf.get_default_graph())
writer.close()
def fully_connected(x, output_dim, scope):
with tf.variable_scope(scope) as scope:
w = tf.get_variable('weights', [x.shape[1], output_dim], initializer=tf.random_normal_initializer())
b = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, w) + b
def two_hidden_layers(x):
with tf.variable_scope('two_hidden_layers', reuse = tf.AUTO_REUSE):
h = fully_connected(x, 50, 'h1')
h = fully_connected(h, 10, 'h2')
return h
tf.reset_default_graph()
x1 = tf.truncated_normal(shape = [200, 100], name = 'x1')
x2 = tf.truncated_normal(shape = [200, 100], name = 'x2')
logits1 = two_hidden_layers(x1)
logits2 = two_hidden_layers(x2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
l1 = sess.run(fetches = logits1, feed_dict = {x1 : np.ones(shape = [200, 100])})
l2 = sess.run(fetches = logits2, feed_dict = {x2 : np.ones(shape = [200, 100])})
print('name : {}'.format(logits1.name))
print('name : {}'.format(logits2.name))
print(np.all(l1 == l2))
name : two_hidden_layers/h2/add:0 name : two_hidden_layers_1/h2/add:0 True
writer = tf.summary.FileWriter(logdir = '../graphs/lecture05/va_sharing_w4',
graph = tf.get_default_graph())
writer.close()