各种深度学习框架实现猫狗大战
2020-01-11 07:59
716 查看
目录
不同深度学习框架下的实现教程/github地址(好用的话记得star噢)
1.Pytorch
一个教程和项目地址,代码需要自己建立项目,或者从github上下载
PyTorch 入门实战(五)——2013kaggle比赛 猫狗大战的实现
https://github.com/nickhuang1996/Dogs_vs_Cats_Pytorch
2.TensorFlow
个人感觉写的不是很好,但是也算完成了分类任务, 可以通过tensorboard查看损失和准确率变化
https://github.com/nickhuang1996/Dogs_vs_Cats_TensorFlow_No_Keras
3.Keras
利用TensorFlow中的Keras接口,代码比较完善,博主还花了很长的时间把用到的预训练网络改成Pytorch的形式
https://github.com/nickhuang1996/Dogs_vs_Cats_TensorFlow_Keras
例如resnet50:
[code]from tensorflow.python.keras.layers import Activation from tensorflow.python.keras.layers import AveragePooling2D from tensorflow.python.keras.layers import BatchNormalization from tensorflow.python.keras.layers import Conv2D from tensorflow.python.keras.layers import Dense from tensorflow.python.keras.layers import Flatten from tensorflow.python.keras.layers import GlobalAveragePooling2D from tensorflow.python.keras.layers import GlobalMaxPooling2D from tensorflow.python.keras.layers import Input from tensorflow.python.keras.layers import MaxPooling2D from tensorflow.python.keras.layers import ZeroPadding2D from tensorflow.python.keras import backend as K from tensorflow.python.keras import layers class conv_block(object): def __init__(self, kernel_size, filters, stage, block, strides=(2, 2)): filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' self.conv1 = Conv2D(filters1, (1, 1), strides=strides, name=conv_name_base + '2a') self.bn1 = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a') self.conv2 = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b') self.bn2 = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b') self.conv3 = Conv2D(filters3, (1, 1), name=conv_name_base + '2c') self.bn3 = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c') self.shortcut_conv = Conv2D(filters3, (1, 1), strides=strides, name=conv_name_base + '1') self.shortcut_bn = BatchNormalization(axis=bn_axis, name=bn_name_base + '1') self.relu = Activation('relu') def __call__(self, input_tensor): x = self.conv1(input_tensor) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) x = self.conv3(x) x = self.bn3(x) shortcut = self.shortcut_conv(input_tensor) shortcut = self.shortcut_bn(shortcut) x = layers.add([x, shortcut]) x = self.relu(x) return x class identity_block(object): def __init__(self, kernel_size, filters, stage, block): filters1, filters2, filters3 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' self.conv1 = Conv2D(filters1, (1, 1), name=conv_name_base + '2a') self.bn1 = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a') self.conv2 = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '2b') self.bn2 = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b') self.conv3 = Conv2D(filters3, (1, 1), name=conv_name_base + '2c') self.bn3 = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c') self.relu = Activation('relu') def __call__(self, input_tensor): x = self.conv1(input_tensor) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) x = self.conv3(x) x = self.bn3(x) x = layers.add([x, input_tensor]) x = self.relu(x) return x class ResNet50(object): def __init__(self, include_top=True, classes=1000, pooling=None): if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 self.include_top = include_top self.pooling = pooling self.conv1 = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1') self.bn1 = BatchNormalization(axis=bn_axis, name='bn_conv1') self.relu = Activation('relu') self.maxpool = MaxPooling2D((3, 3), strides=(2, 2)) self.layer1_list = [ conv_block(3, [64, 64, 256], stage=2, block='a', strides=(1, 1)), identity_block(3, [64, 64, 256], stage=2, block='b'), identity_block(3, [64, 64, 256], stage=2, block='c'), ] self.layer2_list = [ conv_block(3, [128, 128, 512], stage=3, block='a'), identity_block(3, [128, 128, 512], stage=3, block='b'), identity_block(3, [128, 128, 512], stage=3, block='c'), identity_block(3, [128, 128, 512], stage=3, block='d'), ] self.layer3_list = [ conv_block(3, [256, 256, 1024], stage=4, block='a'), identity_block(3, [256, 256, 1024], stage=4, block='b'), identity_block(3, [256, 256, 1024], stage=4, block='c'), identity_block(3, [256, 256, 1024], stage=4, block='d'), identity_block(3, [256, 256, 1024], stage=4, block='e'), identity_block(3, [256, 256, 1024], stage=4, block='f'), ] self.layer4_list = [ conv_block(3, [512, 512, 2048], stage=5, block='a'), identity_block(3, [512, 512, 2048], stage=5, block='b'), identity_block(3, [512, 512, 2048], stage=5, block='c'), ] self.avgpool = AveragePooling2D((7, 7), name='avg_pool') self.flatten = Flatten() self.fc = Dense(classes, activation='softmax', name='fc1000') self.GAP = GlobalAveragePooling2D() self.GMP = GlobalMaxPooling2D() def layer1(self, x): for i in range(len(self.layer1_list)): x = self.layer1_list[i](x) return x def layer2(self, x): for i in range(len(self.layer2_list)): x = self.layer2_list[i](x) return x def layer3(self, x): for i in range(len(self.layer3_list)): x = self.layer3_list[i](x) return x def layer4(self, x): for i in range(len(self.layer4_list)): x = self.layer4_list[i](x) return x def __call__(self, img_input): x = self.conv1(img_input) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) if self.include_top: x = self.flatten(x) x = self.fc(x) else: if self.pooling == 'avg': x = self.GAP(x) elif self.pooling == 'max': x = self.GMP(x) return x
4.MXNet
从头到尾写了一遍发现和TensorFlow和Pytorch都很相似,感觉用起来也很不错~
https://github.com/nickhuang1996/Dogs_vs_Cats_MXNet
希望以上教程和代码可以帮助到更多学习深度学习框架的人们!!
- 点赞 3
- 收藏
- 分享
- 文章举报
相关文章推荐
- 开源|如何用Caffe深度学习框架实现A-Fast-RCNN
- TensorFlow 技术框架解析 | 图文理解深度学习技术实现
- 深度学习笔记——深度学习框架TensorFlow之DNN深度神经网络的实现(十四)
- 深度学习-传统神经网络使用TensorFlow框架实现MNIST手写数字识别
- TensorFlow深度学习框架学习(二):TensorFlow实现线性支持向量机(SVM)
- 如何用深度学习框架PaddlePaddle实现智能春联
- 深度学习框架大战正在进行,谁将夺取“深度学习工业标准”的荣耀?
- TensorFlow:实战Google深度学习框架(二)实现简单神经网络
- 玩转数据系列:利用阿里云机器学习在深度学习框架下实现智能图片分类
- 如何用深度学习框架PaddlePaddle实现智能春联
- 深度学习框架Tensorflow学习--RNN实现识别数字
- Tensorflow:实战Google深度学习框架(VGGNet16--实现mnist数据集分类)
- 深度学习框架Tensorflow学习--CNN实现识别数字
- 深度学习框架TensorFlow学习(二)----简单实现Mnist
- UC伯克利、谷歌无监督深度学习框架,模仿人眼实现视频中的自我运动认知
- 斯坦福CS231n 2017最新课程:李飞飞详解深度学习的框架实现与对比
- 利用深度强化学习框架解决金融投资组合管理问题(附 GitHub 实现)
- 【云周刊】第118期:利用阿里云机器学习在深度学习框架下实现智能图片分类
- 深度学习-CNN卷积神经网络使用TensorFlow框架实现MNIST手写数字识别
- 如何用深度学习框架PaddlePaddle实现智能春联