您的位置:首页 > 理论基础 > 计算机网络

吴恩达Deeplearning.ai课程:浅层神经网络python实验代码(二)

2018-03-19 16:53 696 查看
import numpy as np
import matplotlib.pyplot as plt

import sklearn
import sklearn.datasets
import sklearn.linear_model
import matplotlib

def plot_decision_boundary(pred_func):

# 设定最大最小值,附加一点点边缘填充
x_min, x_max = X[0, :].min() - .5, X[0, :].max() + .5
y_min, y_max = X[1, :].min() - .5, X[1, :].max() + .5
h = 0.01

xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))

# 用预测函数预测一下
Z = pred_func(np.c_[xx.ravel(), yy.ravel()].T)
Z = Z.reshape(xx.shape)

# 然后画出图
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[0, :].tolist(), X[1, :].tolist(), s=40, c= y[0, :].tolist(), cmap=plt.cm.Spectral)

class Neuralnetwork:
#单隐层神经网络
def __init__(self,t , m, n, X, y, alpha, iters):
self.t = t #神经元个数
self.m = m #样本数量:列数
self.n = n #'特征数量:行数'
self.X = X #'给定输入X'
self.y = y #'真值标签,类别向量1*m'
self.alpha = alpha #'学习率'
self.iters = iters #'迭代次数'
self.w1 = np.random.randn(t, n) * 0.01 # 第一层参数向量,维度是t * n(神经元个数*特征数量)
self.b1 = np.zeros((t, 1))
self.w2 = np.random.randn(1, t) * 0.01
self.b2 = np.zeros((1, 1))
return

def sigmod(self, x):
return 1 / (1 + np.exp(-x))

def relu(self, x):
return np.maximum(0, x)

def relu_prime(self, x):   # relu函数的导数
s = x
s[x <= 0] = 0
s[x > 0] = 1
return s

def train(self):
# Forward
loss = []
dw_1 = []
db_1 = []
dw_2 = []
db_2 = []
for i in range(self.iters):
A_1 = self.relu(np.dot(self.w1, self.X) + self.b1)
A_2 = self.sigmod(np.dot(self.w2, A_1) + self.b2)
loss =  -np.sum((np.dot(self.y, np.log(A_2).T) + np.dot(1 - self.y, np.log(1 - A_2).T)))/self.m

# Backward
dz_2 = A_2 - self.y
dw_2 = np.dot(dz_2, A_1.T)/ self.m
db_2 = np.sum(dz_2,axis = 1)

dz_1 = np.multiply(np.dot(self.w2.T,dz_2) , self.relu_prime(np.dot(self.w1, self.X) + self.b1))
dw_1 = np.dot(dz_1, self.X.T)/ self.m
db_1 = np.sum(dz_1, axis = 1)

# Update weights
self.w1 = self.w1 - self.alpha * dw_1
self.b1 = self.b1 - self.alpha * db_1
self.w2 = self.w2 - self.alpha * dw_2
self.b2 = self.b2 - self.alpha * db_2

if((i+1)%1000 == 0):
print(str(i+1)+" times loss = "+str(loss))
return self.w1,self.b1,self.w2,self.b2

def predict(self, X):
n, m = X.shape
Y_prediction = np.zeros((1,m))
A_1 = self.relu(np.dot(self.w1, X) + self.b1)
prob = self.sigmod(np.dot(self.w2,A_1) + self.b2)
for i in range(m):
if prob[0,i] > 0.5:
Y_prediction[0,i]  = 1
else:
Y_prediction[0,i] = 0
return Y_prediction

if __name__ == '__main__':

X = []
y = []

fileIn = open('G:/testSet.txt')
for line in fileIn.readlines():
lineArr = line.strip().split()
X.append([float(lineArr[0]), float(lineArr[1])])
y.append(float(lineArr[2]))
X = np.mat(X).T
y = np.mat(y)

n ,m = X.shape
t = 15 # 设置神经元个数
alpha = 0.05  # 设置更新速率
iters = 40000  # 设置迭代停止条件
lr = Neuralnetwork(t, m, n, X, y, alpha, iters)
w1,b1,w2,b2= lr.train()

Z = lr.predict(X)

# 绘制输入散点图
plot_decision_boundary(lambda x: lr.predict(x))
plt.show()


内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: 
相关文章推荐