标签:lambda 链路 network 更新 function spec 实现 隐藏 cal
以3层网络为例,Python实现;
主要函数:
创建neuralNetwork类,在该类中通过__init__()函数实现数据初始化,当实例化neuralNetwork类时(即创建对象)会自动调用初始化函数;
class neuralNetwork: #initialise the neural network def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate): self.inodes = inputnodes self.hnodes = hiddennodes self.onodes = outputnodes self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5),(self.hnodes, self.inodes)) self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5),(self.onodes, self.hnodes)) #Learning rate self.lr = learningrate self.actication_function = lambda x:scipy.special.expit(x) pass
输入层到隐藏层的权重初值,根据X=W*I知道,矩阵W的大小是[hidden nodes,input nodes],只有这样对应的X大小才是hidden nodes的大小,大小确定后,在定义矩阵时对号入座即可;
根据经验取权重值大小为1/√节点数 ,在结合numpy的正态分布函数即可;
得到两组链路权重:
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5),(self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5),(self.onodes, self.hnodes))
def train(self, inputs_list, targets_list): #转置由行变成列 inputs = numpy.array(inputs_list, ndmin=2).T targets = numpy.array(targets_list, ndmin=2).T #隐藏层输入信号计算:权重计算 hidden_inputs = numpy.dot(self.wih,inputs) #隐藏层输出信号计算:S函数计算 hidden_outputs = self.actication_function(hidden_inputs) #输出层输入信号计算:权重计算 final_inputs = numpy.dot(self.who, hidden_outputs) #输出层输出信号计算:S函数计算 final_outputs = self.actication_function(final_inputs) #输出层误差:目标值-计算值 output_errors = targets - final_outputs #隐藏层输出误差 hidden_errors = numpy.dot(self.who.T, output_errors) #取未更新self.who计算误差 #隐藏层权重更新 self.who += self.lr*numpy.dot((output_errors*final_outputs*(1.0-final_outputs)), numpy.transpose(hidden_outputs)) #误差计算后再更新self.who #输入层权重更新 self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),numpy.transpose(inputs))
def query(self, inputs_list): inputs = numpy.array(inputs_list, ndmin=2).T hidden_inputs = numpy.dot(self.wih,inputs) #根据训练后的权重计算输出 hidden_outputs = self.actication_function(hidden_inputs) final_inputs = numpy.dot(self.who, hidden_outputs)#根据训练后的权重计算输出 final_outputs = self.actication_function(final_inputs) return final_outputs
取MNIST提供的训练数据100个进行学习,获得学习后的更新权重,再自己车手写一个数字进行识别;
input_nodes = 784 hidden_nodes = 100 output_nodes = 10 learning_rate = 0.3 n = neuralNetwork(input_nodes,hidden_nodes,output_nodes,learning_rate) tranining_data_file = open("F:/0_TechPath/0_ControlMind/3_NeuralNetwork/mnist_dataset/mnist_train_100.csv", ‘r‘) tranining_data_list = tranining_data_file.readlines() tranining_data_file.close() for record in tranining_data_list: all_values = record.strip("\n").split(‘,‘, -1) inputs = numpy.array(all_values[1:], dtype=numpy.uint8)/255*0.99+0.01 #输入归一化处理 targets = numpy.zeros(output_nodes) + 0.01 #各个输出节点初值0.01 targets[int(all_values[0])] = 0.99 #对应期望节点处的目标值 n.train(inputs, targets)#调用train函数 #测试网络 fname = "F:/0_TechPath/0_ControlMind/3_NeuralNetwork/mnist_dataset/pic.png" image = Image.open(fname) image = image.convert(‘L‘) width,height = image.size img_array = image.resize((28,28)) img_arr = numpy.array(img_array) my_image = 255-img_arr.reshape(-1,784) image_array = numpy.array(my_image, dtype=numpy.uint8).reshape((28, 28)) scaled_input = numpy.array(my_image, dtype=numpy.uint8) / 255 * 0.99 + 0.01 values = n.query(scaled_input) max_index1 = numpy.argmax(values) print("I‘m:",max_index1) plt.imshow(image_array, cmap=‘Greys‘, interpolation=‘None‘) plt.show()
标签:lambda 链路 network 更新 function spec 实现 隐藏 cal
原文地址:https://www.cnblogs.com/GavinDu/p/12369498.html