天天看點

動手學深度學習——線性回歸的簡單實作

import torch
from torch import nn

true_w = [2, -3.4]
true_b = 6
num_input = 2
num_examples = 1000

#建立資料
import numpy as np
features = torch.tensor(np.random.normal(0, 1, size=(num_examples, num_input)), dtype=torch.float)
labels = features[:, 0] * true_w[0] + features[:, 1] * true_w[1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)

#建立用于學習的資料
import torch.utils.data as Data
batch_size = 10
dataset = Data.TensorDataset(features, labels)
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)

#定義神經網路
class LinearNet(nn.Module):
    def __init__(self, n_features):
        super(LinearNet, self).__init__()
        self.linear = nn.Linear(n_features, 1)
    def forward(self, x):
        return self.linear(x)

net = LinearNet(num_input)

#初始化神經網絡
from torch.nn import init

init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)
#定于損失函數
loss = nn.MSELoss()
#定義優化函數
import torch.optim as Optim

optimizer = Optim.SGD(net.parameters(), lr=0.03)
#進行訓練
for i in range(1, 4):
    for X, y in data_iter:
        output = net(X)
        l = loss(output, y.view(-1 , 1))
        optimizer.zero_grad()
        l.backward()
        optimizer.step()

print(net.linear.weight)
print(net.linear.bias)
           

繼續閱讀