天天看点

利用梯度下降法求解“最小二乘法”各个参数问题

参考了这个博客中的数据

# -*- coding:utf-8 -*-
import numpy as np
import math

m = 

X = [[, ],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [,,],
     [,,],
     [,,],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [, ,],
     [, ,]
     ]

y = [, , , , , , , , , , ,
     , , , , , , , , ]
alpha = 

# 梯度函数
def gradient_function(theta, X, y):
    diff = []
    for index in range(len(y)):
        diff.append((theta[] * X[index][] + theta[] * X[index][]) - y[index])

    result = [, ]
    for index in range(len(y)):
        result[] += (X[index][] * diff[index])
        result[] += (X[index][] * diff[index])

    result[] = result[] / 
    result[] = result[] / 
    return result

def gradient_descent(X, y, alpha):
    theta = [, ]
    gradient = gradient_function(theta, X, y)
    while True :
        theta[] = theta[] - (alpha * gradient[])
        theta[] = theta[] - (alpha * gradient[])
        gradient = gradient_function(theta, X, y)
        print theta
    return theta

gradient_descent(X, y, alpha)



           

继续阅读