import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
#1 構造待模拟線性回歸的點
num_points =
vector_set = []
for i in range(num_points):
x1 = np.random.normal(, )
y1 = x1 * + + np.random.normal(, )
vector_set.append([x1, y1])
x_data = [v[] for v in vector_set]
y_data = [v[] for v in vector_set]
#2 定義計算架構
##2.1 定義拟合函數
w = tf.Variable(tf.random_uniform([], -, ), name="w")
b = tf.Variable(tf.zeros([]), name="b")
y = w * x_data + b
##2.2 定義loss函數
loss = tf.reduce_mean(tf.square(y - y_data) * , name="lose")
##2.3 定義優化器梯度下降法
optimizer = tf.train.GradientDescentOptimizer()
##2.4 定義訓練目标
train = optimizer.minimize(loss, name='train')
#3 疊代計算進行訓練
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print("w=", sess.run(w), ", b=", sess.run(b), ", loss=", sess.run(loss))
for _ in range():
sess.run(train)
print("w=", sess.run(w), ", b=", sess.run(b), ", loss=", sess.run(loss))
##這裡增加了一個步驟,将拟合出的函數顯示到坐标圖上
plt.scatter(x_data, y_data, c="r")
plt.plot(x_data, sess.run(w) * x_data + sess.run(b))
plt.show()
以上代碼出處為以下課程,本人在完成該課程學習時,在本地環境運作了以上代碼并在注釋中增加了自己的了解。
http://study.163.com/course/courseMain.htm?courseId=1003606092