以下是我對您的代碼所做的修改,使其能夠工作:我不想使用np.random.random,它在區間[0.0,1.0)中生成數字來初始化權重,使用np.random.uniform在[-1.0,1.0)中生成統一的随機浮點數。
将輸入空間圍繞原點居中(即,去掉平均值)并将其規格化。
以下是如何進行初始化:for i in range(len(sizeOfLayers)):
self.activation.append(sizeOfLayers[i]*[0.0] + [0.0])
self.weightsIn = np.random.uniform(-1,1,(sizeOfLayers[1], sizeOfLayers[0] + 1))
self.weightsOut = np.random.uniform(-1,1,(sizeOfLayers[2], sizeOfLayers[1] + 1))
然後您還必須在函數forward中的activation追加1:
^{pr2}$
你可能想改變學習率,使它工作(大約0.5為我工作)。另外,你的均方誤差計算是錯誤的:你應該乘以0.5,而不是除法。在
以下是修改後的代碼:import numpy as np
import random
class neural_network():
activation = [] #List of values with the values of activation of each layers
weightsIn = []
weightsOut = []
def __init__(self, sizeOfLayers):
'''
sizeOfLayers: Tuple with numbers of neurons of each layer
(in, hidden, out)
'''
if len(sizeOfLayers) > 3:
raise ValueError('Wrong number of layers')
self.sizeOfLayers = sizeOfLayers
for i in range(len(sizeOfLayers)):
#input layer + bias
self.activation.append(sizeOfLayers[i]*[0.0] + [0.0])
# Wi = len(Hid) x len(IN)+1(bias)
self.weightsIn = np.random.uniform(-1,1,(sizeOfLayers[1], sizeOfLayers[0] + 1))
# Wo = len(OUT) x len(Hid)
self.weightsOut = np.random.uniform(-1,1,(sizeOfLayers[2], sizeOfLayers[1] + 1))
def forward(self, X):
'''
X: Vetor de entradas
'''
#In+bias add ativation vector
self.activation[0] = np.vstack((np.array([X]).T, np.array([1])))
#sum of (weights x in)
self.sumHidden = self.weightsIn.dot(self.activation[0])
#Ativation of hidden layer
self.activation[1] = np.vstack( ( self.sigmoid(self.sumHidden), np.array([1]) ) )
#sum of(out weights x activation of last layer)
self.sumOut = self.weightsOut.dot(self.activation[1])
#activation of output
self.activation[2] = (self.sigmoid(self.sumOut))
return self.activation[2].T
def backPropagate(self, Y, trainRate = 0.1):
'''
Y: output target
trainRate:
'''
if len(Y) != self.sizeOfLayers[2]:
raise ValueError('Wrong number of inputs')
#Calc of output delta
error_o = Y.T - self.activation[2].T
out_delta = self.sigmoidPrime(self.activation[2]) * error_o.T
#Calc of hidden delta
error_h = out_delta.T.dot(self.weightsOut)
hiden_delta = self.sigmoidPrime(self.activation[1]) * error_h.T
# update output weights output
change_o = self.activation[1] * out_delta.T
for i in range(self.sizeOfLayers[2]):
for j in range(self.sizeOfLayers[1]):
self.weightsOut[i][j] = self.weightsOut[i][j] + trainRate*change_o[j][i]
# update Input weights
change_h = self.activation[0] * hiden_delta.T
for i in range(self.sizeOfLayers[1]):
for j in range(self.sizeOfLayers[0]):
self.weightsIn[i][j] = self.weightsIn[i][j] + trainRate*change_h[j][i]
#Error
return np.sum((Y.T - self.activation[2].T)**2)*0.5
def sigmoid(self, z, derv = False):
if derv == False:
return 1/(1+np.exp(-z))
def sigmoidPrime(self, z):
return self.sigmoid(z)*(1-self.sigmoid(z))
def train(self, target, trainRate = 0.5, it = 50000):
for i in range(it):
error = 0.0
for t in target:
inputs = np.array(t[0])
targets = np.array([t[1]])
self.forward(inputs)
error = error + self.backPropagate(targets, trainRate)
nn = neural_network((2,5,1))
xor = [
[[-1.0, -1.0], [0]],
[[-1.0, 1.0], [1]],
[[ 1.0, -1.0], [1]],
[[ 1.0, 1.0], [0]] #If I change her to 1 it converges
]
nn.train(xor)
for e in xor:
nn.forward(e[0])
print nn.activation[2]
祝你好運!在