您的位置:首页 > 理论基础 > 计算机网络

经典卷积神经网络(LeNet、AlexNet、VGG、GoogleNet、ResNet)的实现(MXNet版本)

2018-03-07 16:40 851 查看
  卷积神经网络(Convolutional Neural Network, CNN)是一种前馈神经网络,它的人工神经元可以响应一部分覆盖范围内的周围单元,对于大型图像处理有出色表现。

  其中 文章 详解卷积神经网络(CNN)已经对卷积神经网络进行了详细的描述,这里为了学习MXNet的库,所以对经典的神经网络进行实现~加深学习印象,并且为以后的使用打下基础。其中参考的为Gluon社区提供的学习资料~

1.简单LeNet的实现

  

def LeNet():
"""
较早的卷积神经网络
:return:
"""
net = nn.Sequential()
with net.name_scope():
net.add(
nn.Conv2D(channels=20, kernel_size=5, activation='relu'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=50, kernel_size=3, activation='relu'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Flatten(),
nn.Dense(128, activation="relu"),
nn.Dense(10)
)
return net


2. AlexNet:

  由于图片数据集的扩大和硬件设备的发展,更深层更复杂的神经网络模型被使用,其中代表为AlexNet,与相对较小的LeNet相比,AlexNet包含8层变换,其中有五层卷积和两层全连接隐含层,以及一个输出层。

def AlexNet():
"""
对leNet的一个扩展,得益于数据集和硬件资源的发展
:return:
"""
net = nn.Sequential()
with net.name_scope():
net.add(
# 第一阶段
nn.Conv2D(channels=96, kernel_size=11, strides=4, activation='relu'),
nn.MaxPool2D(pool_size=3, strides=2),
# 第二阶段
nn.Conv2D(channels=256, kernel_size=5, padding=2, activation='relu'),
nn.MaxPool2D(pool_size=3, strides=2),
# 第三阶段
nn.Conv2D(channels=384, kernel_size=3, padding=1, activation='relu'),
nn.Conv2D(channels=384, kernel_size=3, padding=1, activation='relu'),
nn.Conv2D(channels=256, kernel_size=3, padding=1, activation='relu'),
nn.MaxPool2D(pool_size=3, strides=2),
# 第四阶段
nn.Flatten(),
nn.Dense(4096, activation="relu"),
nn.Dropout(.5),
# 第五阶段
nn.Dense(4096, activation="relu"),
nn.Dropout(.5),
# 第六阶段
nn.Dense(10)
)
return net


3. VGGNet:

  考虑到当网络层数非常多时,一层一层堆叠网络结构,非常麻烦,VGG使用了编程语言自带的便利,采用了函数和循环的方式,复制了网络结构里面的大量重复结构,因此可以很紧凑来构造这些网络。而第一个使用这种结构的深度网络是VGG。

def VGGNet(architecture):
"""
通过引入了函数和循环的方式,可以快速创建任意层数的神经网络
:return:
"""
def vgg_block(num_convs, channals):
"""
定义一个网络的基本结构,由若干卷积层和一个池化层构成
VGG的一个关键是使用很多有着相对小的kernel(3×3)的卷积层然后接上一个池化层,之后再将这个模块重复多次。因此先定义一个这样的块:
:param num_convs: 卷积层的层数
:param channals: 通道数
:return:
"""
net = nn.Sequential()
for _ in range(num_convs):
net.add(nn.Conv2D(channels=channals, kernel_size=3, padding=1, activation='relu'))
net.add(nn.MaxPool2D(pool_size=2, strides=2))
return net

def vgg_stack(architecture):
"""
定义所有卷积层的网络结构,通过参数将定义的网络结构封装起来
:param architecture: 指定的网络结构参数
:return:
"""
net = nn.Sequential()
for (num_convs, channals) in architecture:
net.add(vgg_block(num_convs, channals))
return net

# 在卷积层之后,采用了两个全连接层,然后使用输出层输出结果。
net = nn.Sequential()
with net.name_scope():
net.add(
vgg_stack(architecture),
nn.Flatten(),
nn.Dense(4096, activation='relu'),
nn.Dropout(0.5),
nn.Dense(4096, activation='relu'),
nn.Dropout(0.5),
nn.Dense(10)
)
return net


4. NiNNet:

  注意到卷积神经网络一般分成两块,一块主要由卷积层构成,另一块主要是全连接层。在Alexnet里我们看到如何把卷积层块和全连接层分别加深加宽从而得到深度网络。另外一个自然的想法是,我们可以串联数个卷积层块和全连接层块来构建深度网络。

  不过这里的一个难题是,卷积的输入输出是4D矩阵,然而全连接是2D。同时在卷积神经网络里我们提到如果把4D矩阵转成2D做全连接,这个会导致全连接层有过多的参数。NiN提出只对通道层做全连接并且像素之间共享权重来解决上述两个问题。就是说,我们使用kernel大小是1×1的卷积。

class DataLoader(object):
"""similiar to gluon.data.DataLoader, but might be faster.

The main difference this data loader tries to read more exmaples each
time. But the limits are 1) all examples in dataset have the same shape, 2)
data transfomer needs to process multiple examples at each time
"""
def __init__(self, dataset, batch_size, shuffle, transform=None):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.transform = transform

def __iter__(self):
data = self.dataset[:]
X = data[0]
y = nd.array(data[1])
n = X.shape[0]
# 顺序打乱
if self.shuffle:
idx = np.arange(n)
np.random.shuffle(idx)
X = nd.array(X.asnumpy()[idx])
y = nd.array(y.asnumpy()[idx])

for i in range(n//self.batch_size):
if self.transform is not None:
yield self.transform(X[i*self.batch_size:(i+1)*self.batch_size],
y[i*self.batch_size:(i+1)*self.batch_size])
else:
yield (X[i*self.batch_size:(i+1)*self.batch_size],
y[i*self.batch_size:(i+1)*self.batch_size])

def __len__(self):
return len(self.dataset)//self.batch_size

def load_data_fashion_mnist(batch_size, resize=None, root="~/.mxnet/datasets/fashion-mnist"):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
# Transform a batch of examples.
if resize:
n = data.shape[0]
new_data = nd.zeros((n, resize, resize, data.shape[3]))
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x width x channel to batch x channel x height x width
return nd.transpose(data.astype('float32'), (0,3,1,2))/255, label.astype('float32')

mnist_train = gluon.data.vision.FashionMNIST(root=root, train=True, transform=None)
mnist_test = gluon.data.vision.FashionMNIST(root=root, train=False, transform=None)
# Transform later to avoid memory explosion.
train_data = DataLoader(mnist_train, batch_size, shuffle=True, transform=transform_mnist)
test_data = DataLoader(mnist_test, batch_size, shuffle=False, transform=transform_mnist)
return train_data, test_data

def try_gpu():
"""If GPU is available, return mx.gpu(0); else return mx.cpu()"""
try:
ctx = mx.gpu()
_ = nd.array([0], ctx=ctx)
except:
ctx = mx.cpu()
return ctx

def _get_batch(batch, ctx):
"""return data and label on ctx"""
if isinstance(batch, mx.io.DataBatch):
data = batch.data[0]
label = batch.label[0]
else:
data, label = batch
return (gluon.utils.split_and_load(data, ctx),
gluon.utils.split_and_load(label, ctx),
data.shape[0])

def train(train_data, test_data, net, loss, trainer, ctx, num_epochs, print_batches=None):
"""Train a network"""
print("Start training on ", ctx)
if isinstance(ctx, mx.Context):
ctx = [ctx]
for epoch in range(num_epochs):
train_loss, train_acc, n, m = 0.0, 0.0, 0.0, 0.0
if isinstance(train_data, mx.io.MXDataIter):
train_data.reset()
start = time()
for i, batch in enumerate(train_data):
data, label, batch_size = _get_batch(batch, ctx)
losses = []
with autograd.record():
outputs = [net(X) for X in data]
losses = [loss(yhat, y) for yhat, y in zip(outputs, label)]
for l in losses:
l.backward()
train_acc += sum([(yhat.argmax(axis=1)==y).sum().asscalar()
for yhat, y in zip(outputs, label)])
train_loss += sum([l.sum().asscalar() for l in losses])
trainer.step(batch_size)
n += batch_size
m += sum([y.size for y in label])
if print_batches and (i+1) % print_batches == 0:
print("Batch %d. Loss: %f, Train acc %f" % (
n, train_loss/n, train_acc/m
))

test_acc = evaluate_accuracy(test_data, net, ctx)
print("Epoch %d. Loss: %.3f, Train acc %.2f, Test acc %.2f, Time %.1f sec" % (
epoch, train_loss/n, train_acc/m, test_acc, time() - start
))


View Code
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: 
相关文章推荐