[置顶] 用Python+numpy实现单隐层神经网络
2017-09-23 10:20
525 查看
利用矩阵运算,快速实现单隐层,sigmoid激活函数BP神经网络,下面是源码
# -*- coding:utf-8 -*- import numpy as np import matplotlib.pyplot as plt from sklearn import datasets class bpNN: def __init__(self, numH, numO, learning_rate, n_iteration): self.numH = numH self.numO = numO self.learning_rate = learning_rate self.n_iteration = n_iteration self.weight_H = 0 self.b_H = 0 self.weight_O = 0 self.b_O = 0 def __ini_weight_b(self, x): #初始化权重和偏置 w1 = np.random.random([self.numH, x.shape[0]]) * 0.01 b1 = np.zeros([self.numH, 1]) w2 = np.random.random([self.numO, self.numH]) * 0.01 b2 = np.zeros([self.numO, 1]) return w1, b1, w2, b2 def sigmoid(self, value): #sigmoid函数 y = 1.0 / (1 + np.exp(-value)) return y def z_a(self, x): #计算隐层z1 = w1x + b1, a1 = sigmoid(z1), 输出层z2 = w2a1 + b2, a2 = sigmoid(z2) z1 = np.dot(self.weight_H, x) + self.b_H.repeat(x.shape[1]).reshape(self.numH, x.shape[1]) a1 = self.sigmoid(z1) z2 = np.dot(self.weight_O, a1) + self.b_O.repeat(x.shape[1]).reshape(self.numO, x.shape[1]) a2 = self.sigmoid(z2) return z1, a1, z2, a2 def fit(self, x, y): #训练函数, BP误差传递主体 self.weight_H, self.b_H, self.weight_O, self.b_O = self.__ini_weight_b(x) for i in range(self.n_iteration): z1, a1, z2, a2 = self.z_a(x) dz2 = (a2 - y) dw2 = np.dot(dz2, a1.T) db2 = dz2.sum(axis=1).reshape(self.numO,1) da1 = np.dot(self.weight_O.T, dz2) dz1 = da1 * (a1 - a1**2) dw1 = np.dot(dz1, x.T) db1 = dz1.sum(axis=1).reshape(self.numH,1) self.weight_H = self.weight_H - self.learning_rate * dw1 self.b_H = self.b_H - self.learning_rate * db1 self.weight_O = self.weight_O - self.learning_rate * dw2 self.b_O = self.b_O - self.learning_rate * db2 def predict(self, x): #预测函数 z1, a1, z2, a2 = self.z_a(x) return a2; X, y = datasets.make_moons(200, noise=0.2) #产生训练集 plt.figure(1) #训练集图像 plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral) train_X = np.array(X).T bpNN = bpNN(8, 1, 0.01, 50000) bpNN.fit(train_X, y) predict_y = np.floor(bpNN.predict(train_X) * 1.99999) plt.figure(2) #用训练集预测图像 plt.scatter(X[:, 0], X[:, 1], s=40, c=predict_y, cmap=plt.cm.Spectral) x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = np.floor(bpNN.predict(np.c_[xx.ravel(), yy.ravel()].T) * 1.99999) Z = Z.reshape(xx.shape) plt.figure(3) #预测分界图像 plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) plt.show()
相关文章推荐
- 循环神经网络在Python 、Numpy和Theano中的实现
- 循环神经网络教程第二部分-用python,numpy,theano实现一个RNN
- 用python的numpy实现神经网络 实现 手写数字识别
- [置顶] 【深度学习】RNN循环神经网络Python简单实现
- [置顶] 【python 神经网络】BP神经网络python实现-iris数据集分类
- 循环神经网络教程-第二部分 用python numpy theano实现RNN
- 神经网络/自编码器的实现(向量化Python版本实现)
- 【Python-ML】神经网络-Theano张量库(GPU版的Numpy)
- 小白学习机器学习---第五章:神经网络简单模型python实现
- cs231n一次课程实践,python实现softmax线性分类器和二层神经网络
- 【深度学习】1.2:简单神经网络的python实现
- 深度学习与神经网络-吴恩达(Part1Week2)-Logistic Regression编程实现(python)
- 6.2神经网络算法实现--python机器学习
- Python神经网络代码识别手写字的实现流程(一):加载mnist数据
- 神经网络之Inception模型的实现(Python+TensorFlow)
- Python 实现感知器模型、两层神经网络
- 使用python实现深度神经网络 3
- 神经网络与深度学习笔记(二)python 实现随机梯度下降
- 十一行Python代码实现一个误差逆传播(BP)神经网络
- 一个 11 行 Python 代码实现的神经网络