您的位置:首页 > 理论基础 > 计算机网络

Tensorflow学习之卷积神经网络实现(六)

2017-09-18 16:44 344 查看
  本次主要在Tensorflow中实现ResNetV2,通常认为神经网络的深度对其性能非常重要,但是网络越深其训练那度越大,于Resnet相似的Highway Network的目标就是解决极深的神经网络难以训练的问题。修改了每一层的激活函数,此前的激活函数只是对输入做一个非线性变换y=H(x,Wh),而Highway Network则允许保留一定比例的原始输入x,即y=H(x,Wh)T(x,Wt)+x*C(x,Wc),其中T为变换系数,C为保留系数,论文中令C=1-T,这样前面一层的信息,有一定的比例可以不经过矩阵乘法和非线性变换,直接传输到下一层,主要通过gating units学习如何控制网络中的信息流,即学习原始信息应保留的比例。

  而Resnet最初的灵感出自:在不断加深神经网络的深度时,会出现一个Degradation问题,即准确率会先上升然后达到饱和,在持续增加深度会导致准确率下降,这并不是过拟合的问题,因为训练和验证误差都增大。假设输入为x,期望输出为H(x),那我们需要学习的就是F(x)=H(x)-x,也就是残差。通过直接将输入信息绕道传到输出,保护信息的完整性,整个网络则只需要学习输入、输出差别的那一部分,简化学习目标和难度。而ResnetV2则是发现前馈和反馈信号可以直接传输,因此将非线性激活函数替换为Identity Mapping(y=x),同时在每一层都适用了Batch Normalization,这样处理之后,新的残差学习单元将比以前更容易训练且泛化性更强。

import collections
import tensorflow as tf
slim = tf.contrib.slim
from datetime import datetime
import time
import math

#使用clooection.namedtuple设计resnet的Block模块组并创建类
#args中三个参数代表(depth,depth_bottleneck,stride)
class Block(collections.namedtuple('Bolck',['scope','unit_fn','args'])):
'A named tuple tuple describing a Resnet block'
def subsample(inputs,factor,scope=None):
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs,[1,1],stride=factor,scope=scope)
def conv2d_same(inputs,num_outputs,kernel_size,stride,scope = None):
if stride == 1:
return slim.conv2d(inputs,num_outputs,kernel_size,stride=1,padding = 'SAME',scope=scope)
else:
pad_total = kernel_size - 1
pad_beg = pad_total//2
pad_end = pad_total-pad_beg
inputs = tf.pad(inputs,[[0,0],[pad_beg,pad_end],[pad_beg,pad_end],[0,0]])
return slim.conv2d(inputs,num_outputs,kernel_size,stride=stride,padding = 'VALID',scope = scope)
#堆叠Block函数
@slim.add_arg_scope
def stack_blocks_dense(net,blocks,outputs_collections=None):
for block in blocks:
with tf.variable_scope(block.scope,'block',[net]) as sc:
for i ,unit in enumerate(block.args):
with tf.variable_scope('unit_%d'%(i+1),values=[net]):
unit_depth,unit_depth_bottleneck,unit_stride = unit
net = block.unit_fn(net,depth=unit_depth,depth_bottleneck=unit_depth_bottleneck,stride=unit_stride)
net = slim.utils.collect_named_outputs(outputs_collections,sc.name,net)
return net

#Resnet的arg_scope
def resnet_arg_scope(is_training=True,weight_decay = 0.0001,batch_norm_decay=0.997,batch_norm_epsilon = 1e-5,batch_norm_scale = True):
batch_norm_params = {'is_training':is_training,'decay':batch_norm_decay,'epsilon':batch_norm_epsilon,'scale':batch_norm_scale,'updates_collections':tf.GraphKeys.UPDATE_OPS,}
with slim.arg_scope([slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm],**batch_norm_params):
with slim.arg_scope([slim.max_pool2d],padding='SAME') as arg_sc:
return arg_sc
#核心的bottleneck残差单元
@slim.add_arg_scope
def bottleneck(inputs,depth,depth_bottleneck,stride,outputs_collections=None,scope=None):
with tf.variable_scope(scope,'bottleneck_v2',[inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(),min_rank=4)
preact = slim.batch_norm(inputs,activation_fn=tf.nn.relu,scope='preact')
if depth==depth_in:
shortcut=subsample(inputs,stride,'shortcut')
else:
shortcut = slim.conv2d(preact,depth,[1,1],stride=stride,normalizer_fn=None,activation_fn=None,scope='shortcut')
residual = slim.conv2d(preact,depth_bottleneck,[1,1],stride=1,scope='conv1')
residual = conv2d_same(residual,depth_bottleneck,3,stride,scope='conv2')
residual = slim.conv2d(residual,depth,[1,1],stride=1,normalizer_fn=None,activation_fn=None,scope='conv3')
output=shortcut+residual
return slim.utils.collect_named_outputs(outputs_collections,sc.name,output)
#主函数

def resnet_v2(inputs,blocks,num_classes=None,global_pool=True,include_root_block=True,reuse=None,scope=None):
with tf.variable_scope(scope,'resnet_v2',[inputs],reuse=reuse) as sc:
end_points_collection = sc.original_name_scope+'_end_points'
with slim.arg_scope([slim.conv2d,bottleneck,stack_blocks_dense],outputs_collections=end_points_collection):
net =inputs
if include_root_block:
with slim.arg_scope([slim.conv2d],activation_fn=None,normalizer_fn=None):
net = conv2d_same(net,64,7,stride=2,scope='conv1')
net=slim.max_pool2d(net,[3,3],stride=2,scope='pool1')
net=stack_blocks_dense(net,blocks)
net=slim.batch_norm(net,activation_fn=tf.nn.relu,scope='postnorm')
if global_pool:
net = tf.reduce_mean(net,[1,2],name='pool5',keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net,num_classes,[1,1],activation_fn=None,normalizer_fn=None,scope='logits')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(net,scope='predictions')
return net,end_points
#设计曾数分别为50,101,152,200的resnet
def resnet_v2_50(inputs,num_classes=None,global_pool=True,reuse=None,scope='resnet_v2_50'):
blocks=[
Block('block1',bottleneck,[(256,64,1)]*2+[(256,64,2)]),
Block('block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
Block('block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),
Block('block4', bottleneck, [(2048, 512, 1)] * 3 )]
return resnet_v2(inputs,blocks,num_classes,global_pool,include_root_block=True,reuse=reuse,scope=scope)

def resnet_v2_101(inputs,num_classes=None,global_pool=True,reuse=None,scope='resnet_v2_101'):
blocks=[
Block('block1',bottleneck,[(256,64,1)]*2+[(256,64,2)]),
Block('block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
Block('block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),
Block('block4', bottleneck, [(2048, 512, 1)] * 3 )]
return resnet_v2(inputs,blocks,num_classes,global_pool,include_root_block=True,reuse=reuse,scope=scope)

def resnet_v2_152(inputs,num_classes=None,global_pool=True,reuse=None,scope='resnet_v2_152'):
blocks=[
Block('block1',bottleneck,[(256,64,1)]*2+[(256,64,2)]),
Block('block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),
Block('block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
Block('block4', bottleneck, [(2048, 512, 1)] * 3 )]
return resnet_v2(inputs,blocks,num_classes,global_pool,include_root_block=True,reuse=reuse,scope=scope)

def resnet_v2_200(inputs,num_classes=None,global_pool=True,reuse=None,scope='resnet_v2_200'):
blocks=[
Block('block1',bottleneck,[(256,64,1)]*2+[(256,64,2)]),
Block('block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),
Block('block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
Block('block4', bottleneck, [(2048, 512, 1)] * 3 )]
return resnet_v2(inputs,blocks,num_classes,global_pool,include_root_block=True,reuse=reuse,scope=scope)
def time_tensorflow_run(session,target,info_string):
num_steps_burn_in = 10 #预热轮数:给程序热身,头几轮迭代有显存加载,cache命中等问题因此可以跳过,只考量10论迭代之后计算时间
total_duration = 0.0 #总时间
total_duration_squared = 0.0#平方和用以计算方差
for i in range(num_batches + num_steps_burn_in):
start_time = time.time() #记录时间
_ = session.run(target) #执行每次迭代
duration = time.time() - start_time
if i >= num_steps_burn_in:
if not i %10:
print('%s:step %d,duration = %.3f' %
(datetime.now(),i-num_steps_burn_in,duration))
total_duration +=duration
total_duration_squared +=duration * duration #以便计算后面每轮耗时的均值和标准差
mn = total_duration / num_batches #平均耗时
vr = total_duration_squared / num_batches -mn * mn
sd = math.sqrt(vr) #标准差
print('%s:%s across %d steps,%.3f +/- %.3f sec /batch' % (datetime.now(),info_string,num_batches,mn,sd))

batch_size = 32
height,width=224,224
inputs = tf.random_uniform((batch_size,height,width,3))
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net,end_points=resnet_v2_152(inputs,1000)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
num_batches=100
time_tensorflow_run(sess,net,'Forward')
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  神经网络