天天看點

代碼實踐:MLP的反向傳播算法

import h5py
import matplotlib.pyplot as plt
import numpy as np

def load_dataset():
    #拿到這個檔案字典
    train_dataset = h5py.File('./train_catvnoncat.h5','r')
    test_dataset =  h5py.File('./test_catvnoncat.h5','r')
    #拿到字典中所有的鍵值
#     for key in train_dataset.keys():
        #列印Key-Value
#         print (train_dataset[key])
    #list_classes表示是種類,train_set_x表示訓練的資料集,train_set_y"表示訓練的資料集的标簽
    #有209個圖檔集,每個圖檔集是 64*64*3
    #<HDF5 dataset "list_classes": shape (2,), type "|S7">
    #<HDF5 dataset "train_set_x": shape (209, 64, 64, 3), type "|u1">
    #<HDF5 dataset "train_set_y": shape (209,), type "<i8">

    #拿到209個圖檔集,使用np做成數組
    train_set_x = np.array(train_dataset["train_set_x"][:])
    train_set_y = np.array(train_dataset["train_set_y"][:])
    test_set_x = np.array(test_dataset["test_set_x"][:])
    test_set_y = np.array(test_dataset["test_set_y"][:])
    #列印次元
#     print(train_set_x.shape)
    #(209, 64, 64, 3)

    #設定窗體大小
#     plt.figure(figsize = (2,2))
    #設定第0張
#     plt.imshow(train_set_x[11])
#     plt.show()
    #顯示标簽是否為貓
#     print(train_set_y[11])
    #第1 個元素,也就是圖檔數,第二個參數 -1 是 其餘數相乘),T代表轉置,形成(12288,209)
    train_set_x = train_set_x.reshape(train_set_x.shape[0],-1).T
#     print(train_set_x.shape)
    test_set_x = test_set_x.reshape(train_set_x.shape[0],-1).T
    #轉化y的坐标
    train_set_y = train_set_y.reshape(train_set_y.shape[0],-1).T
    test_set_y = train_set_y.reshape(train_set_y.shape[0],-1).T#(1,209)
    
    return train_set_x,train_set_y,test_set_x,test_set_y

def init_parameters(fc_net):
    #定義一個字典,存放參數矩陣W1,b1,W2,b2,W3,b3,W4,b4
    #a1 = W1*a0+b1
    parameters = {}
    #拿到層數
    layers_num = len(fc_net)
#     print(layers_num)
    for L in range(1,layers_num):
        #使用高斯分布,每一個字典,形成一個fc_net[L]行fc_net[L-1]列的随機數組
        parameters["W"+str(L)] = np.random.randn(fc_net[L],fc_net[L-1]) 
        #fc_net[L]行1列
        parameters["b"+str(L)] = np.zeros((fc_net[L],1))
#     for L in range(1,layers_num):
        #列印W1到W4
#         print("W"+str(L) +"=",parameters["W"+str(L)])
#         print("b"+str(L) +"=",parameters["b"+str(L)])
    
    return parameters
#定義sigmoid函數
def sigmoid(Z):
    return 1/(1+np.exp(-Z))
    

def forward_pass(A0,parameters):
    #緩存函數
    cache = {}
    A = A0
    #緩存A0
    cache["A0"] = A0
    #python //代表整除,向下取整
    Layer_num = len(parameters) // 2
    #周遊 rang(1,5)是周遊1,2,3,4
    for L in range(1, Layer_num+1):
        #dot是矩陣乘法
        #其中對于b來說的話,b是1列的,但是 python中有廣播機制,可以擴充為209列
        Z = np.dot(parameters["W"+str(L)],A) +parameters["b"+str(L)]
        #A1 = (4,12288)*(12288,209) +(4,1) = (4,209) +(4,1) = (4,209)
        #A2 = (3,4)*(4,209) +(3,1) = (3,209)+(3,1) = (3,209)
        #A3 = (2,3)*(3,209) +(2,1) = (2,209)
        #A4 = (1,209)
        A = sigmoid(Z)
        #給所有 中間值Z1-Z4 和 A1 -A1緩存
        cache["Z"+str(L)] = Z
        cache["A"+str(L)] = A
        
    return A,cache
    
def compute_loss(AL,Y):
    #代價函數
    m = Y.shape[1 ]        #Y = (1,209)
    cost = (1/m)*np.sum((1/2)*(AL-Y)*(AL-Y))
    return cost
   
def backward_pass(AL,parameters,cache,Y):
    #樣本總數 209
    m = Y.shape[1]
    #定義字典,存儲所有層的梯度
    gredient = {}
    #拿到層數
    Layer_num = len(parameters) //2
    #末層誤差 dJ/dz   dZL.shape = (1,209)
    dZL= (AL -Y)*(AL*(1-AL) )
    #第4層的梯度 W4 = 1/m *(dZL, A )
    gredient["dW"+str(Layer_num)] = (1/m)*np.dot(dZL,cache["A"+ str(Layer_num -1)].T)
    gredient["db" +str(Layer_num)] = (1/m)*np.sum(dZL,axis=1,keepdims =True)#axis=1代表橫向疊加,keepdims讓其橫向為矩陣,有次元
    #周遊[3,2,1]
    for L in reversed (range(1,Layer_num)):
        dZL = np.dot(parameters["W"+str(L+1)].T,dZL)*(AL*(1-AL))
        gredient["dW"+str(L)] = (1/m)*np.dot(dZL,cache["A"+ str(L -1)].T)
        gredient["db" +str(L)] = (1/m)*np.sum(dZL,axis=1,keepdims =True)
    return gredient 
    
# 反向傳播算法    
def  update_parameters(gredient,parameters,LearnRate):
    #     w: = w -learningRate *dw
    #     w: = b -learningRate *db
    Layer_num = len(parameters)//2
    for L in range(1,Layer_num+1):
        #周遊[1,2,3,4] 
        parameters["W" +str(L)] = parameters["W" +str(L)]  - LearnRate*gredient["dW"+str(L)]
        parameters["b" +str(L)] = parameters["b" +str(L)]  - LearnRate*gredient["db"+str(L)]
    return parameters
        
    
    
if __name__ == '__main__':
    #1.加載資料
    train_set_x,train_set_y,test_set_x,test_set_y = load_dataset()
    #2.輸入像素值做歸一化 ()
    train_set_x = train_set_x/255.0
    test_set_x = test_set_x/255.0
    #3.定義全連接配接神經網絡各層神經元個數,并初始化參數w和b,12288代表輸入像素數
    fc_net = [12288,4,3,2,1]
    #4.初始化全連接配接層
    parameters =  init_parameters(fc_net)
    #z = wx+b ;a =f(z)
    #AL  = (1,209)
    
    #5前向計算  iterations代表反向傳播更新的次數
    iterations = 500
    #設定學習率
    LearnRate = 0.01
    #cost儲存每10次疊代計算得到的代價值
    costs = []
    for iteration in range (0,iterations):
        AL,cache = forward_pass(train_set_x,parameters) 

    #6.代價函數,多樣本損失值
        loss = compute_loss(AL, train_set_y)
        if iteration%10 == 0:
            #每100個列印一次
            costs.append(loss)
            print("loss == ",loss)
    #7 .AL為輸入資料,parameters中有w和b,cache中有Z和A,train_set_y為标簽值
        gredient = backward_pass(AL,parameters,cache,train_set_y)
    #8. 梯度下降 gredient,根據梯度更新一次參數    
        parameters   =  update_parameters(gredient,parameters,LearnRate)
        
    plt.plot(costs,'p')
    plt.xlabel("opooc-iteration")
    plt.ylabel("opooc-cost")
    plt.show()   
           

繼續閱讀