天天看点

尚学堂-反向传播算法过程及公式推导

此文章仅为本人的学习笔记,侵权删。

视频地址: 【尚学堂】AI人工智能PyTorch深度学习进阶教程_PyTorch反向传播推导_代码实现神经网络算法_PyTorch神经网络_PyTorch深度学习课程

参考文章:

  1. “反向传播算法”过程及公式推导(超直观好懂的Backpropagation)

    这讲了正向传播和反向传播的具体过程。

  2. 反向传播算法(过程及公式推导)

    这篇文章讲了反向传播算法的推导

尚学堂-反向传播算法过程及公式推导

代码练习:

数据集:数据

Python实现神经网络完成手写数字识别任务

激活函数是relu,输出层是softmax分类

import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.utils.extmath import safe_sparse_dot
           
# 由于输入层需要10个节点,所以最好把目标数字0-9做成One Hot编码的形式
def tran_y(y_true):
    y_ohe = np.zeros(10)
    y_ohe[int(y_true)] = 1
    return y_ohe
           
mnist = fetch_mldata('MNIST original', data_home='data/for_my_own_nn_data/')

X, y = mnist["data"], mnist["target"]
print(X.shape)
print(y.shape)
y = np.array([tran_y(y[i]) for i in range(len(y))])


hidden_layer_sizes = [300, 100] #隐藏层个数:一个300,一个100
max_iter = 200
alpha = 0.0001  # 正则项系数
learning_rate = 0.001
           
(70000, 784)
(70000,)
           
def log_loss(y_true, y_prob): #交叉熵
    """
    计算logistic loss对于分类任务
    """
    y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
    if y_prob.shape[1] == 1:
        y_prob = np.append(1 - y_prob, y_prob, axis=1)
    if y_true.shape[1] == 1:
        y_true = np.append(1 - y_true, y_true, axis=1)
    return -np.sum(y_true * np.log(y_prob)) / y_prob.shape[0]
           
def softmax(x): #实现softmax非线性变换
    tmp = x - x.max(axis=1)[:, np.newaxis]
    np.exp(tmp, out=x)
    x /= x.sum(axis=1)[:, np.newaxis]
    return x

           
def relu(x):
    np.clip(x, 0, np.finfo(x.dtype).max, out=x) 
    return x

           
def relu_derivative(z, delta):
    """
    实现relu的导数
    """
    delta[z == 0] = 0#z==0执行,不等于0不执行
           
def gen_batches(n, bs):
    """
    产生一个批次的样本数据的索引
    :param n: 样本总数
    :param bs: batch_size批大小
    :return: 一个批次样本的索引
    """
    start = 0
    for _ in range(int(n // bs)):
        end = start + bs
        yield slice(start, end)
        start = end
    if start < n:
        yield slice(start, n)

           
n_samples, n_features = X.shape
n_outputs = y.shape[1]

batch_size = min(200, n_samples)
layer_units = ([n_features] + hidden_layer_sizes + [n_outputs]) 
#神经元个数:输入层神经元:[n_features] 隐藏:hidden_layer_sizes 输出层:[n_outputs]
n_layers = len(layer_units)#共有多少层
           
# 初始化W和b
coefs_ = []
intercepts_ = []
for i in range(n_layers - 1):#初始化各层的w与b
    fan_in = layer_units[i]
    fan_out = layer_units[i + 1]
    # 推荐的初始化方法Xavier Glorot.论文得出
    factor = 6.
    init_bound = np.sqrt(factor / (fan_in + fan_out))
    coef_init = np.random.uniform(-init_bound, init_bound, (fan_in, fan_out))
    intercept_init = np.random.uniform(-init_bound, init_bound, fan_out)
    coefs_.append(coef_init)
    intercepts_.append(intercept_init)
           
# 初始化一些集合用于存放正向传播层层结果,反向传播层层梯度结果和中间数据deltas
# activations存放每一层的输出,输入层就是X,其它层先构建好输出的形状
activations = [X]
activations.extend(np.empty((batch_size, n_fan_out)) for n_fan_out in layer_units[1:]) #每层设置一个空矩阵来存储结果
# 求梯度时候必要的一部分,grads = a*deltas
deltas = [np.empty_like(a_layer) for a_layer in activations]#每层设置一个空矩阵来存储结果
# 初始化层与层之间的W矩阵对应计算出来的gradients
coef_grads = [np.empty((n_fan_in_, n_fan_out_)) for n_fan_in_, n_fan_out_
              in zip(layer_units[:-1], layer_units[1:])]
# 初始化隐藏层到输出层对应的bias对应计算出来的gradients
intercept_grads = [np.empty(n_fan_out_) for n_fan_out_ in
                   layer_units[1:]]

loss_ = 0.0
           
# mini batch 梯度下降
for it in range(max_iter):#轮次
    arr = np.arange(n_samples)
    np.random.shuffle(arr)
    X = X[arr]
    y = y[arr]
    accumulated_loss = 0.0

    for batch_slice in gen_batches(n_samples, batch_size):#批次
        batch_X = X[batch_slice]
        batch_y = y[batch_slice]
        # 赋值输入层数据
        activations[0] = batch_X

        # 正向传播
        for i in range(n_layers - 1):
            activations[i + 1] = safe_sparse_dot(activations[i], coefs_[i])
            activations[i + 1] += intercepts_[i]
            # 对于隐藏层
            if (i + 1) != (n_layers - 1):
                activations[i + 1] = relu(activations[i + 1])
        # 对于最后一层输出层
        activations[i + 1] = softmax(activations[i + 1])

        # 计算平均Loss
        loss = log_loss(batch_y, activations[-1])
        # 给Loss添加L2正则项
        values = np.sum(np.array([np.dot(s.ravel(), s.ravel()) for s in coefs_]))
        loss += (0.5 * alpha) * values / len(batch_y)
        accumulated_loss += loss * len(batch_y)

        # 反向传播

        # last是指反向传播从后面第一个要开始计的算层的索引号
        last = n_layers - 2
        # 这里计算delta[last]适用于输出非线性变换和损失函数的组合:
        # softmax and categorical cross entropy
        # 为了去计算倒数第一个W矩阵的梯度,先计算last对应的deltas
        deltas[last] = activations[-1] - batch_y

        # 计算倒数第一个W矩阵的梯度,即从输出层返回过来的梯度
        # 1,base loss对应的梯度
        coef_grads[last] = safe_sparse_dot(activations[last].T, deltas[last])
        # 2,L2 loss对应的梯度
        coef_grads[last] += (alpha * coefs_[last])
        # 3,梯度求平均
        coef_grads[last] /= n_samples
        # 4,截距项,base loss对应的梯度
        intercept_grads[last] = np.mean(deltas[last], 0)

        # 迭代计算各个隐藏层前面的W矩阵对应的梯度
        for i in range(n_layers - 2, 0, -1):
            # deltas_previous = deltas * W * 激活函数的导
            deltas[i - 1] = safe_sparse_dot(deltas[i], coefs_[i].T)
            # 应用上激活函数relu的导
            relu_derivative(activations[i], deltas[i - 1])

            # 计算每个隐藏层前面的W矩阵的梯度
            # 1,base loss对应的梯度
            coef_grads[i - 1] = safe_sparse_dot(activations[i - 1].T, deltas[i - 1])
            # 2,L2 loss对应的梯度
            coef_grads[i - 1] += (alpha * coefs_[i - 1])
            # 3,梯度求平均
            coef_grads[i - 1] /= n_samples
            # 4,截距项,base loss对应的梯度
            intercept_grads[i - 1] = np.mean(deltas[i - 1], 0)

        # 梯度下降更新参数

        # 这里的+号不是numpy数组之间的计算,而是python原生的list列表拼接
        # grads: list, length = len(coefs_) + len(intercepts_)
        grads = coef_grads + intercept_grads
        updates = [-learning_rate * grad for grad in grads]
        # Wt+1 = Wt - learning_rate * grad
        # params: list, length = len(coefs_) + len(intercepts_)
        params = coefs_ + intercepts_
        for param, update in zip(params, updates):
            param += update

    loss_ = accumulated_loss / X.shape[0]
    print("Iteration %d, loss = %.8f" % (it, loss_))

    # TO DO :
    # 连续10次loss变换幅度小于阈值, break跳出

    # TO DO :
    # 一次迭代后调整一次学习率

    # TO DO :
    # train_test_split评估一下测试集准确率


           
Iteration 0, loss = 16.41843381
Iteration 1, loss = 12.05028002
Iteration 2, loss = 9.59518521
Iteration 3, loss = 8.07381126
Iteration 4, loss = 7.05051051
Iteration 5, loss = 6.34085800
Iteration 6, loss = 5.80947574
Iteration 7, loss = 5.39779761
Iteration 8, loss = 5.06627548
Iteration 9, loss = 4.78721871
Iteration 10, loss = 4.55101589
Iteration 11, loss = 4.35204685
Iteration 12, loss = 4.17534207
Iteration 13, loss = 4.02260116
Iteration 14, loss = 3.88357171
Iteration 15, loss = 3.76303320
Iteration 16, loss = 3.65209822
Iteration 17, loss = 3.55253729
Iteration 18, loss = 3.46278989
Iteration 19, loss = 3.37840236
Iteration 20, loss = 3.30269155
Iteration 21, loss = 3.23202948
Iteration 22, loss = 3.16538411
Iteration 23, loss = 3.10627120
Iteration 24, loss = 3.04794362
Iteration 25, loss = 2.99438211
Iteration 26, loss = 2.94299062
Iteration 27, loss = 2.89611537
Iteration 28, loss = 2.85032957
Iteration 29, loss = 2.80667936
Iteration 30, loss = 2.76552825
Iteration 31, loss = 2.72595690
Iteration 32, loss = 2.68790869
Iteration 33, loss = 2.65275958
Iteration 34, loss = 2.61760695
Iteration 35, loss = 2.58602208
Iteration 36, loss = 2.55289127
Iteration 37, loss = 2.52326688
Iteration 38, loss = 2.49402990
           

继续阅读