深度学习入门笔记(11)——python实现一维线性回归
代码
import numpy as np
import torch
from torch import nn
import matplotlib.pyplot as plt
from torch import optim
# 初始化(x,y),数字是随便取的
x = np.array([[3.3], [4.4], [5.5], [6.6], [3.1], [5.63423], [2.568],
[3.546], [6.654], [8.2412]], dtype=np.float32)
y = np.array([[1.3], [2.4], [3.5], [4.6], [2.1], [8.63423], [6.568],
[7.546], [4.654], [2.2412]], dtype=np.float32)
# 格式转换
x = torch.from_numpy(x)
y = torch.from_numpy(y)
# plt.scatter(x, y, color='red', marker='*')
# plt.show()
# 定义计算图
class Line_demo(nn.Module):
def __init__(self):
super(Line_demo, self).__init__()
self.linear = nn.Linear(1, 1)# 一维
def forward(self, x):
output = self.linear(x)
return output
model = Line_demo()
# plt.scatter(x, y, color='red', marker='*')
# plt.show()
# 损失函数
criterion = nn.MSELoss()
# 优化器
optim = optim.SGD(model.parameters(), lr=1e-3)
# 模型训练
num = 100
for epoch in range(num):
# 格式转换
inputs = torch.autograd.Variable(x)
targets = torch.autograd.Variable(y)
# forward
output = model(inputs)
loss = criterion(output, targets)
# backward
optim.zero_grad()
loss.backward()
optim.step()
# 进度打印
if (epoch + 1) % 10 == 0:
print("Epoch[{}/{}],Loss:{:.6f}".format(epoch + 1, num, loss.item()))
# 预测
model.eval()
predict = model(torch.autograd.Variable(x))
predict = predict.data.numpy()
# 显示
plt.plot(x.numpy(), y.numpy(), 'ro', label='Original data')
plt.plot(x.numpy(), predict, label='Line')
plt.show()
运行结果
Epoch[10/100],Loss:8.669944
Epoch[20/100],Loss:7.855971
Epoch[30/100],Loss:7.599496
Epoch[40/100],Loss:7.514923
Epoch[50/100],Loss:7.483373
Epoch[60/100],Loss:7.468194
Epoch[70/100],Loss:7.458087
Epoch[80/100],Loss:7.449567
Epoch[90/100],Loss:7.441560
Epoch[100/100],Loss:7.433733
Process finished with exit code 0
深度学习入门笔记(11)——python实现一维线性回归