天天看點

(續) MindSpore 如何實作一個線性回歸 —— Demo示例

前文中我們使用自己編寫的損失函數和單步梯度求導來實作算法,這裡是作為擴充,我們這裡使用系統提供的損失函數和優化器,代碼如下:

import mindspore
import numpy as np #引入numpy科學計算庫
import matplotlib.pyplot as plt #引入繪圖庫
np.random.seed(123) #随機數生成種子

import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import ParameterTuple, Parameter
from mindspore import dtype as mstype
from mindspore import Model
import mindspore.dataset as ds
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import LossMonitor


# 資料集
class DatasetGenerator:
    def __init__(self):
        self.input_data = 2*np.random.rand(500, 1).astype(np.float32)
        self.output_data = 5+3*self.input_data+np.random.randn(500, 1).astype(np.float32)

    def __getitem__(self, index):
        return self.input_data[index], self.output_data[index]

    def __len__(self):
        return len(self.input_data)


def create_dataset(batch_size=500):
    dataset_generator = DatasetGenerator()
    dataset = ds.GeneratorDataset(dataset_generator, ["input", "output"], shuffle=False)

    #buffer_size = 10000
    #dataset = dataset.shuffle(buffer_size=buffer_size)
    dataset = dataset.batch(batch_size, drop_remainder=True)
    #dataset = dataset.repeat(1)
    return dataset
    

class Net(nn.Cell):
    def __init__(self, input_dims, output_dims):
        super(Net, self).__init__()
        self.matmul = ops.MatMul()

        self.weight_1 = Parameter(Tensor(np.random.randn(input_dims, 128), dtype=mstype.float32), name='weight_1')
        self.bias_1 = Parameter(Tensor(np.zeros(128), dtype=mstype.float32), name='bias_1')
        self.weight_2 = Parameter(Tensor(np.random.randn(128, 64), dtype=mstype.float32), name='weight_2')
        self.bias_2 = Parameter(Tensor(np.zeros(64), dtype=mstype.float32), name='bias_2')
        self.weight_3 = Parameter(Tensor(np.random.randn(64, output_dims), dtype=mstype.float32), name='weight_3')
        self.bias_3 = Parameter(Tensor(np.zeros(output_dims), dtype=mstype.float32), name='bias_3')

    def construct(self, x):
        x1 = self.matmul(x, self.weight_1)+self.bias_1
        x2 = self.matmul(x1, self.weight_2)+self.bias_2
        x3 = self.matmul(x2, self.weight_3)+self.bias_3
        return x3


def main():
    epochs = 10000
    dataset = create_dataset()
    net = Net(1, 1)
    # loss function
    loss = nn.MSELoss()
    # optimizer
    optim = nn.SGD(params=net.trainable_params(), learning_rate=0.000001)
    model = Model(net, loss, optim, metrics={'loss': nn.Loss()})

    config_ck = CheckpointConfig(save_checkpoint_steps=10000, keep_checkpoint_max=10)
    ck_point = ModelCheckpoint(prefix="checkpoint_mlp", config=config_ck)
    model.train(epochs, dataset, callbacks=[ck_point, LossMonitor(1000)], dataset_sink_mode=False)


    np.random.seed(123)  # 随機數生成種子
    dataset = create_dataset()
    data = next(dataset.create_dict_iterator())
    x = data['input']
    y = data['output']
    y_hat = model.predict(x)


    eval_loss = model.eval(dataset, dataset_sink_mode=False)
    print("{}".format(eval_loss))


    fig=plt.figure(figsize=(8,6))#确定畫布大小
    plt.title("Dataset")#标題名
    plt.xlabel("First feature")#x軸的标題
    plt.ylabel("Second feature")#y軸的标題
    plt.scatter(x.asnumpy(), y.asnumpy())#設定為散點圖
    plt.scatter(x.asnumpy(), y_hat.asnumpy())#設定為散點圖
    plt.show()#繪制出來


if __name__ == '__main__':
    """ 設定運作的背景context """
    from mindspore import context
    # 為mindspore設定運作背景context
    # context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
    context.set_context(mode=context.GRAPH_MODE, device_target='GPU')

    import time
    a = time.time()
    main()
    b = time.time()
    print(b-a)      
(續) MindSpore 如何實作一個線性回歸 —— Demo示例

輸入結果:

(續) MindSpore 如何實作一個線性回歸 —— Demo示例

epoch: 1000 step: 1, loss is 1.0764071

epoch: 2000 step: 1, loss is 1.0253074

epoch: 3000 step: 1, loss is 1.0251387

epoch: 4000 step: 1, loss is 1.0251383

epoch: 5000 step: 1, loss is 1.0251379

epoch: 6000 step: 1, loss is 1.0251367

epoch: 7000 step: 1, loss is 1.0251377

epoch: 8000 step: 1, loss is 1.0251374

epoch: 9000 step: 1, loss is 1.0251373

epoch: 10000 step: 1, loss is 1.025136

{'loss': 1.0251359939575195}

62.462594985961914

繼續閱讀