import tensorflow as tf
import numpy as np
x_data = np.random.rand(100)
y_data = x_data * 0.1 + 0.2
# 线性模型的斜率 & 偏置量
k = tf.Variable(0.)
b = tf.Variable(0.)
y = k * x_data + b
# 二次代价函数
loss = tf.reduce_mean(tf.square(y_data - y))
# 梯度下降
optimizer = tf.train.GradientDescentOptimizer(0.2)
# 最小化代价函数
train = optimizer.minimize(loss)
# 线性模型中,k, b作为变量,会在梯度下降法的作用下不断变化,以使得 loss函数越来越小
with tf.Session() as sess:
tf.global_variables_initializer().run()
for step in range(501):
sess.run(train)
if step % 100 == 0:
print("Step: ", step, "[k, b]", sess.run([k, b]))