#!/usr/bin/env python # coding: utf-8 # In[1]: import tensorflow as tf import tensorflow.contrib.eager as tfe # In[2]: tfe.enable_eager_execution() # 注释本行就可以关闭 Eager Execution # In[3]: class MNISTModel(tfe.Network): def __init__(self): super(MNISTModel, self).__init__() self.layer1 = self.track_layer(tf.layers.Dense(units=10)) self.layer2 = self.track_layer(tf.layers.Dense(units=10)) def call(self, input): """Actually runs the model.""" result = self.layer1(input) result = self.layer2(result) return result # In[4]: # Let's make up a blank input image model = MNISTModel() batch = tf.zeros([1, 1, 784]) print(batch.shape) # (1, 1, 784) result = model(batch) print(result) # tf.Tensor([[[ 0. 0., ...., 0.]]], shape=(1, 1, 10), dtype=float32) # In[5]: def loss_function(model, x, y): y_ = model(x) return tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_) # In[6]: # if not sure we have GPU or not, CPU is always safety optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) for (x, y) in tfe.Iterator(dataset): grads = tfe.implicit_gradients(loss_function)(model, x, y) optimizer.apply_gradients(grads) # In[ ]: # if we have GPU with tf.device("/gpu:0"): for (x, y) in tfe.Iterator(dataset): optimizer.minimize(lambda: loss_function(model, x, y))