In [1]:
import tensorflow as tf
import numpy as np
In [11]:
x_data = np.random.rand(100)
y_data = x_data * 0.1 + 0.2

# 线性模型的斜率 & 偏置量
k = tf.Variable(0.)
b = tf.Variable(0.)
y = k * x_data + b

# 二次代价函数
loss = tf.reduce_mean(tf.square(y_data - y))
# 梯度下降
optimizer = tf.train.GradientDescentOptimizer(0.2)
# 最小化代价函数
train = optimizer.minimize(loss)

# 线性模型中,k, b作为变量,会在梯度下降法的作用下不断变化,以使得 loss函数越来越小
with tf.Session() as sess:
    tf.global_variables_initializer().run()
    for step in range(501):
        sess.run(train)
        if step % 100 == 0:
            print("Step: ", step, "[k, b]", sess.run([k, b]))
Step:  0 [k, b] [0.054201454, 0.10031855]
Step:  100 [k, b] [0.10048652, 0.19973609]
Step:  200 [k, b] [0.10003704, 0.1999799]
Step:  300 [k, b] [0.10000283, 0.19999847]
Step:  400 [k, b] [0.10000023, 0.19999987]
Step:  500 [k, b] [0.10000023, 0.19999987]