机器学习_算法_神经网络_BP
2013-08-23 22:10
375 查看
参考:
http://wenku.baidu.com/view/7c38bb1b964bcf84b9d57b7d.html
http://wenku.baidu.com/view/e98dbbd433d4b14e8524680e.html
上面两本将就一下,有些位置是错的,我参考的是它的例子,原理参考下面这本
这里有本专门将BP的,http://wenku.baidu.com/view/00b75f5202768e9951e738de.html
网上一大堆都是讲理论的,少数的把流程写了一下,但是真正实现的就太少了,有代码也是matlab这种直接使用内置的模块包的,没有找到切切实实写出来的
没办法,我自己到处找资料总算把它实现出来了,还是python,用其他语言的朋友也可以看一下
OD实现的,两个大类Nerve和NervesManager
这里发个结果给大家看看,我训练了2000*15圈,我Eta选的是0.1,收敛速度还是比较慢,有几次测试陷到局部最小点了,郁闷,以后还会写几篇BP的改进方法
http://wenku.baidu.com/view/7c38bb1b964bcf84b9d57b7d.html
http://wenku.baidu.com/view/e98dbbd433d4b14e8524680e.html
上面两本将就一下,有些位置是错的,我参考的是它的例子,原理参考下面这本
这里有本专门将BP的,http://wenku.baidu.com/view/00b75f5202768e9951e738de.html
网上一大堆都是讲理论的,少数的把流程写了一下,但是真正实现的就太少了,有代码也是matlab这种直接使用内置的模块包的,没有找到切切实实写出来的
没办法,我自己到处找资料总算把它实现出来了,还是python,用其他语言的朋友也可以看一下
#-*- coding:utf-8 -*- ''' Created on Aug 25, 2013 @author: blacklaw @ref: http://wenku.baidu.com/view/e98dbbd433d4b14e8524680e.html ''' ''' WingLength FeelerLength Class AimValue(0.9 -> 'Apf' 0.1 -> 'Af') ''' DATA = [[1.78, 1.14, 'Apf', 0.9], [1.96, 1.18, 'Apf', 0.9], [1.86, 1.2, 'Apf', 0.9], [1.72, 1.24, 'Af', 0.1], [2.0, 1.26, 'Apf', 0.9], [2.0, 1.28, 'Apf', 0.9], [1.96, 1.3, 'Apf', 0.9], [1.74, 1.36, 'Af', 0.1], [1.64, 1.38, 'Af', 0.1], [1.82, 1.38, 'Af', 0.1], [1.9, 1.38, 'Af', 0.1], [1.7, 1.4, 'Af', 0.1], [1.82, 1.48, 'Af', 0.1], [1.82, 1.54, 'Af', 0.1], [2.08, 1.56, 'Af', 0.1]] Eta = 0.1 from numpy import * import random # Interface class Inspirator(): def inspire(self, x): return 0 # class SigmoidInspirator(Inspirator): def inspire(self, x): return 1 / (1 + e**-x) # class Nerve(): inputs = [] inspirator = Inspirator() threshold = 1 threshold_weight = -0.5 sons = [] sons_weight = {} last_out = 0 def __init__(self): self.clear_inputs() self.clear_sons() self.inspirator = SigmoidInspirator() self.init_threshold() self.sons_weight = {} self.last_out = 0 self.delta = 0 def set_threshold_w(self, w): self.threshold_weight = w return self def init_threshold(self): self.threshold = 1 def input(self, x, weight = 0.5): self.inputs.append({'x':x, 'w':weight, 'o':0}) def add_sons(self, sons, weights = 0.5): DEFAULT_WEIGHT = 0.5 if not isinstance(sons, list): sons = [sons] if not isinstance(weights, list): weights = [weights] for i, son in enumerate(sons): self.sons.append(son) #print weights[i] try: self.sons_weight[son] = weights[i] except: self.sons_weight[son] = DEFAULT_WEIGHT def calc_out(self): return self.inspirator.inspire( sum([input['x'] * input['w'] for input in self.inputs] \ + [self.threshold * self.threshold_weight]) ) def calc_delta(self, sons_detal_sum): return self.last_out * (1 - self.last_out) * sons_detal_sum def clear_inputs(self): self.inputs = [] def clear_sons(self): self.sons = [] def refresh_delta(self): delta = sum([self.sons_weight[son]*son.get_delta() for son in self.sons]) self.set_delta(delta) def refresh_weights(self): for son in self.sons: d_weight = son.get_delta() * Eta * self.last_out self.sons_weight[son] += d_weight #print self.sons_weight[son] self.threshold_weight += Eta * 1 * self.get_delta() #print 'WW',self.threshold_weight def output(self): out = self.calc_out() self.last_out = out for son in self.sons: weight = self.sons_weight[son] son.input(out, weight) return out def set_delta(self, delta): self.delta = delta def get_delta(self): return self.delta def info(self): return "Hash:%s\nInputs:%s\nLast_out:%s\nThreshold:%s W:%s\nSons:%s\nSons_weight:%s" % \ (hash(self), self.inputs, self.last_out, self.threshold, self.threshold_weight, [hash(son) for son in self.sons], [{hash(key) : value} for key, value in self.sons_weight.items()] ) class InputNerve(Nerve): def __init__(self): Nerve.__init__(self) self.inputs = 0 def input(self, x): self.inputs = x def calc_out(self): return self.inputs class NervesManager(): inputs = [] hiddens = [] outputs = [] def __init__(self, nerve_count_list): self.inputs = [] self.hiddens = [] self.outputs = [] self.create(nerve_count_list) def create(self, nerve_count_list): # crate input nerve for i in range(nerve_count_list[0]): input = InputNerve() self.inputs.append(input) for i in range(nerve_count_list[1]): hidden = Nerve() self.hiddens.append(hidden) self.outputs.append(Nerve()) for nerve in self.inputs: nerve.add_sons(self.hiddens, [random.random() for i in range(len(self.hiddens))]) for nerve in self.hiddens: nerve.add_sons(self.outputs, [random.random() for i in range(len(self.outputs))]) def train(self, value_list, T): O = self.test(value_list) nerves = self.inputs + self.hiddens + self.outputs # refresh output nerve by manual self.outputs[0].set_delta(self.outputs[0].calc_delta(T - O)) # refresh input and hidden by son's delta for nerve in self.hiddens + self.inputs: nerve.refresh_delta() # refresh all nerve's sons weight for nerve in nerves: nerve.refresh_weights() return O, T def test(self, value_list): nerves = self.inputs + self.hiddens + self.outputs for i, input in enumerate(self.inputs): input.input(value_list[i]) for nerve in nerves: nerve.output() for nerve in nerves: nerve.clear_inputs() return self.outputs[0].last_out if __name__ == "__main__": #print DATA ''' input nodes: 2 hidden nodes: 2 output nodes: 1 ''' manager = NervesManager([2, 2, 1]) # train BP for i in range(2000): avr_sum = 0 print '******** train **********' for line in DATA: O, T = manager.train([line[0], line[1]], line[3]) avr_sum += (O - T)**2 print O, T avr = avr_sum / len(DATA) print avr if avr < 0.005: break # test BP print "********* test **********" for line in DATA: print manager.test([line[0], line[1]]), line[3]
OD实现的,两个大类Nerve和NervesManager
这里发个结果给大家看看,我训练了2000*15圈,我Eta选的是0.1,收敛速度还是比较慢,有几次测试陷到局部最小点了,郁闷,以后还会写几篇BP的改进方法
********* test ********** 0.994341785362 0.9 0.999736065837 0.9 0.989063612563 0.9 0.163572725812 0.1 0.999218216462 0.9 0.997095492527 0.9 0.730657889775 0.9 0.134026079144 0.1 0.114695534729 0.1 0.142601445274 0.1 0.157811418673 0.1 0.119199019357 0.1 0.120173144422 0.1 0.108409846864 0.1 0.143414585379 0.1
相关文章推荐
- 数学之路(3)-机器学习(3)-机器学习算法-神经网络[5]
- 神经网络学习(四)反向(BP)传播算法(2)-Matlab实现
- 机器学习之深度神经网络算法全套
- 神经网络学习(三)反向(BP)传播算法(1)
- 神经网络中 BP 算法的原理与 Python 实现源码解析
- 利用遗传算法优化神经网络:Uber提出深度学习训练新方式
- python 深度学习、python神经网络算法、python数据分析、python神经网络算法数学基础教学
- DAY7: 神经网络及深度学习基础--算法的优化(deeplearning.ai)
- 深度学习基础模型算法原理及编程实现--04.改进神经网络的方法
- 机器学习:神经网络反向传播推导
- 【原】Andrew Ng斯坦福机器学习 Coursera—Programming Exercise 3 逻辑回归多分类和神经网络
- 七月算法深度学习 第三期 学习笔记-第八节 循环神经网络与相关应用
- 深度学习基础(二):简单神经网络,后向传播算法及实现
- 机器学习入门:你应该学习的8个神经网络结构(一)
- 线性神经网络模型与学习算法
- 25径向基函数神经网络模型与学习算法.ppt
- 深度学习算法实践3---神经网络常用操作实现
- 2017最新整理深度学习神经网络算法全套
- scikit-learn学习之神经网络算法
- 机器学习之神经网络