您的位置:首页 > 理论基础 > 计算机网络

使用python实现深度神经网络--学习笔记

2017-06-21 15:11 351 查看
学习实验楼的课程,虽然后面成为会员专属,但是幸好有可爱的同伴们做了非常详细的笔记,完全就是课件再现啊,以至于我能够顺利的学完这节课程。感谢一下哈哈哈哈,以下是链接

使用python实现深度神经网络

接下来是自己整理的笔记吼

























以下是各文件

#coding=utf-8
#preprocess.py 数据预处理文件
import sys
from scipy import misc

import numpy as np
def main():
l = len(sys.argv)
if l < 2:
print'eg:python img2pkl.py list.txt dist.npy\n'\
'convert image to npy\n'
return

src = sys.argv[1]

dst = sys.argv[2] if l > 2 else 'data.pkl'
with open(src,'r') as f:
list = f.readlines()
data = []
labels = []
for i in list:
name, label = i.strip('\n').split(' ')#将图片列表中的每一行拆分成图片名和图片标签
#print name
#print label
print name+'processed'
img = misc.imread(name)  #将图片读出来,存入一个矩阵
img/=255   #将图片转换为只有0,1的矩阵
img.resize((img.size,1)) #为了方便运算,将图片存储到一个img.size*1的列向量
data.append(img)
labels.append(int(label))
print 'write to npy'
np.save(dst,[data,labels])#将训练数据以npy的形式保存到本地文件
print 'completed'
if __name__=='__main__':
main()


运行以下语句生成相应的
.npy
文件

python preprocess.py train.txt train.npy
python preprocess.py validate.txt validate.npy
python preprocess.py test.txt test.npy


# encoding=utf8
#layers.py 将各个模块封装起来
import numpy as np

class Data:
def __init__(self, name, batch_size):#数据所在的文件名name和batch中图片的数量batch_size
with open(name, 'rb') as f:
data = np.load(f)
self.x = data[0] #输入x
self.y = data[1] #预期正确输出y
self.l = len(self.x)
self.batch_size = batch_size
self.pos = 0#pos用来记录数据读取的位置

def forward (self):
pos=self.pos
bat=self.batch_size
l = self.l
if pos+bat >= l: #已经是最后一个batch时,返回剩余的数据,并设置pos为开始位置0
ret = (self.x[pos:l], self.y[pos:l])
self.pos = 0
index = range(l)
np.random.shuffle(index)  #将训练数据打乱
self.x = self.x[index]
self.y = self.y[index]
else: #不是最后一个batch,pos直接加上batch_size
ret = (self.x[pos:pos + bat], self.y[pos:pos + bat])
self.pos += self.batch_size

return ret, self.pos #返回pos为0时代表一个epoch已经结束

def backward (self,d):  #数据层无backward操作
pass

class FullyConnect:
def __init__(self, l_x, l_y): #两个参数分别为输入层的长度和输出层的长度

self.weights = np.random.randn(l_y,l_x)/np.sqrt(l_x) #使用随机数初始化参数,忽略np.sqrt(l_x)
self.bias = np.random.randn(l_y,1) #使用随机数初始化参数
self.lr=0 #将这一层计算的结果向前传递
'''
self.lr 是学习速率alpha,需要手工设定
l_x是输入单个数据向量的长度,17*17=289
l_y代表全连接层输出的节点数量,由于大写英文字母有26个,因此l_y=26
因此self.weights=26*289,self.bias尺寸为26*1
以包含100个训练输入数据的batch为例分析,
forward()函数的输入x的尺寸为100*289*1(batch_size向量长度为1)
backward()函数的输入d代表从前面的网络层反向传回来的“部分梯度值”,尺寸为100*26*1(batch_sieze输出层节点数l_y*1)
参数初始化
'''
def forward (self,x):
self.x = x #把中间结果保存下来,以备反向传播时使用
self.y = np.array([np.dot(self.weights,xx)+self.bias for xx in x]) #计算全连接层的输出
return self.y #将这一层计算的结果向前传递

def backward (self,d):
ddw = [np.dot(dd, xx.T) for dd, xx in zip(d, self.x)] #根据链式法则,将反向传递回来的导数值乘以x,得到对参数的梯度
self.dw = np.sum(ddw,axis=0)/self.x.shape[0]
self.db = np.sum(d, axis=0)/self.x.shape[0]
self.dx = np.array([np.dot(self.weights.T,dd) for dd in d])
self.weights -= self.lr * self.dw
self.bias -= self.lr * self.db
return self.dx  # 反向传播梯度
'''
损失函数对于输入x的梯度值self.dx的求解,与self.dw类似
self.x中一个数据尺寸为289*1,self.weights尺寸为26*289,dd尺寸为261
因此需要对self.weights进行转置,那么289*1=(289*26)*(26*1)
更新参数
'''

#激活函数
class Sigmoid:
def __init__(self):#无参数,不需初始化
pass
def sigmoid (self,x):
return 1/(1+np.exp(-x))

def forward (self,x):
self.x = x
self.y = self.sigmoid(x)
return self.y
def backward (self,d):
sig=self.sigmoid(self.x)
self.dx = d*sig*(1-sig)
return self.dx #反向传递梯度

#损失函数层
class QuadraticLoss:
def __init__(self):
pass
def forward (self, x, label):
self.x=x
self.label=np.zeros_like(x) #由于label只包含一个数字,需要将其转换成和模型输出值尺寸相匹配的向量形式
for a,b in zip(self.label, label):
a[b] = 1.0 #只有正确标签所代表的位置概率为1, 其余为0
self.loss = np.sum(np.square(x-self.label))/self.x.shape[0]/2#求平均后再除以2,只是为了方便
return self.loss

def backward (self):
self.dx = (self.x-self.label)/self.x.shape[0] #2被抵消
return self.dx

class Accuracy:
def __init__(self):
pass
def forward (self,x, label):
self.accuracy = np.sum([np.argmax(xx)==ll for xx, ll in zip(x, label)])#对预测正确的实例数求和
self.accuracy = 1.0*self.accuracy/x.shape[0] #正确率
return self.accuracy

class CrossEntropyLoss:
def __init__(self):
pass
def forward(self,x, label):
self.x = x
self.label = np.zeros_like(x)
for a, b in zip(self.label, label):
a[b] = 1.0
self.loss = np.nan_to_num(-self.label*np.log(x)-((1-self.label)*np.log(1-x)))
self.loss = np.sum(self.loss)/x.shape[0]
return self.loss

def backward(self):
self.dx = (self.x-self.label)/self.x/(1-self.x)
return self.dx


# encoding=utf8
#shallow.py 浅层神经网络,只构建了一层隐形层,也就是除了输入输出只有一层隐形层
from layers import *

def main():

datalayer1 = Data('train.npy', 1024) # 用于训练,batch_size设置为2014
datalayer2 = Data('validate.npy',10000) # 用于验证所以设置batch——size为10000,一次性计算所有样例
inner_layers = []
inner_layers.append(FullyConnect(17*17,26))
inner_layers.append(Sigmoid())
losslayer = QuadraticLoss()
accuracy = Accuracy()

for layer in inner_layers:
layer.lr = 1000.0 #为所有中间层设置学习速率

epochs = 20
for i in range(epochs):
print 'epochs:', i
losssum = 0
iters = 0
while True:
data, pos = datalayer1.forward()  # 从数据层取出数据
x, label = data
for layer in inner_layers:  # 前向计算
x = layer.forward(x)

loss = losslayer.forward(x,label)  # 调用损失层forward函数计算损失函数值
losssum += loss
iters +=1
d = losslayer.backward() # 调用损失层backward 函数层计算将要反向传播的梯度

for layer in inner_layers[::-1]:# 反向传播
d = layer.backward(d)

if pos == 0: # 一个epoch 完成后进行准确率测试
data, _ =datalayer2.forward()
x, label = data
for layer in inner_layers:
x= layer.forward(x)
accu = accuracy.forward(x, label)  # 调用准确率层forward()函数求出准确率
print 'loss:', losssum/iters
print 'accuracy:', accu
break
if __name__=='__main__':
main()


# encoding=utf8
#deep.py 构建深层神经网络,多加了一层隐形层,其余不分不变,最后发现,在相同的处理条件下,效果竟然不如浅层神经网络,这就是我们遇到的*梯度消失问题*,下次我们对这个问题进行改进
from layers import *

def main():
datalayer1 = Data('train.npy',1024) #用于训练,batch_size设置为2014
datalayer2 = Data('validate.npy',10000) #用于验证所以设置batch——size为10000,一次性计算所有样例
inner_layers = []
inner_layers.append(FullyConnect(17*17,20))
inner_layers.append(Sigmoid())
inner_layers.append(FullyConnect(20,26)) #增加一个隐层
inner_layers.append(Sigmoid())
losslayer = QuadraticLoss()
accuracy = Accuracy()

for layer in inner_layers:
layer.lr = 10000.0 # 为所有中间层设置学习速率

epochs = 20
for i in range(epochs):
print 'epochs:', i
losssum = 0
iters = 0
while True:
data, pos = datalayer1.forward() #从数据层取出数据
x, label = data
for layer in inner_layers: #前向计算
x = layer.forward(x)

loss = losslayer.forward(x, label) # 调用损失层forward函数计算损失函数值
losssum += loss
iters +=1
d = losslayer.backward() #调用损失层backward 函数层计算将要反向传播的梯度

for layer in inner_layers[::-1]:#反向传播
d = layer.backward(d)

if pos == 0: #一个epoch 完成后进行准确率测试
data, _ =datalayer2.forward()
x, label = data
for layer in inner_layers:
x= layer.forward(x)
accu = accuracy.forward(x, label)  # 调用准确率层forward()函数求出准确率
print 'loss:', losssum/iters
print 'accuracy:', accu
break

if __name__=='__main__':
main()


#encoding=utf8
#deep2.py 这次我们使用的损失函数为*交叉熵损失函数*,刚好解决了之前*梯度下降消失*的问题,得到更好的结果
from layers import *

def main():
datalayer1 = Data('train.npy',1024)
datalayer2 = Data('validate.npy', 10000)
inner_layers = []
inner_layers.append(FullyConnect(17*17, 20))
inner_layers.append(Sigmoid())
inner_layers.append(FullyConnect(20,26))
inner_layers.append(Sigmoid())
losslayer = CrossEntropyLoss()
accuracy = Accuracy()

for layer in inner_layers:
layer.lr = 1.0

epochs = 20
for i in range(epochs):
print 'epochs', i
losssum = 0
iters = 0
while True:
data, pos = datalayer1.forward()
x, label = data
for layer in inner_layers:
x = layer.forward(x)

loss = losslayer.forward(x,label)
losssum += loss
iters += 1
d = losslayer.backward()

for layer in inner_layers[::-1]:
d = layer.backward(d)
if pos == 0:
data, _ = datalayer2.forward()
x, label = data
for layer in inner_layers:
x = layer.forward(x)
accu = accuracy.forward(x,label)
print 'loss:', losssum/iters
print 'accuracy:', accu
break

if __name__ == '__main__':
main()


这是运行
shallow.py
得到的结果



这是运行
deep.py
得到的结果,多加一层隐形层,结果反而变坏



这是运行
deep2.py
且将损失函数变为交叉损失函数,可得到非常好的结果

内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: