您的位置:首页 > 其它

tensorflow 学习笔记(六) - 用自己的数据集训练CNN模型

2018-01-15 17:00 495 查看

tensorflow 学习笔记(六)- 用自己的数据集训练CNN模型

环境:macOS High Serria 10.13.2

tensorflow:1.0

python:3.6.1

最近用tensorflow训练自己的模型的时候发现,tensorflow官网上所给的例子,都是用处理好数据格式的mnist数据或者其他格式的数据,所以在训练自己的模型的时候的第一步就卡住了。所以上网搜索了相关的资料之后便得出了相关的解决方案(有好几种,这里只说明一种,另外有TFRecord的格式的网上很多教程,将不在这叙述)….

tensorflow升级到1.0之后,增加了一些高级模块: 如tf.layers, tf.metrics, 和tf.losses,使得代码稍微有些简化。

直接贴代码吧,注释都在代码里面,就不想一步步解释了:

import os
import glob
import time
import numpy as np
import tensorflow as tf
from skimage import io, transform

# os.environ["TF_CPP_MIN_LOG_LEVEL"] = '1'
# 这是默认的显示等级,显示所有信息
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
# 只显示 warning 和 Error
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3'
# 只显示 Error

# 读取图片
def read_img(path, w, h):
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
# print(cate)

imgs = []
labels = []

print('Start read the image ...')

for index, folder in enumerate(cate):
# print(index, folder)
for im in glob.glob(folder + '/*.jpg'):
# print('Reading The Image: %s' % im)
img = io.imread(im)
img = transform.resize(img, (w, h))
imgs.append(img)
labels.append(index)

print('Finished ...')

return np.asarray(imgs, np.float32), np.asarray(labels, np.float32)

# 打乱顺序
def messUpOrder(data, label):
num_example = data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]
label = label[arr]

return data, label

# 将所有数据分为训练集和验证集
def segmentation(data, label, ratio=0.8):
num_example = data.shape[0]
s = np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
x_val = data[s:]
y_val = label[s:]

return x_train, y_train, x_val, y_val

# 构建网络
def buildCNN(w, h, c):
# 占位符
x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')

# 第一个卷积层 + 池化层(100——>50)
conv1 = tf.layers.conv2d(
inputs=x,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

# 第二个卷积层 + 池化层 (50->25)
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

# 第三个卷积层 + 池化层 (25->12)
conv3 = tf.layers.conv2d(
inputs=pool2,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)

# 第四个卷积层 + 池化层 (12->6)
conv4 = tf.layers.conv2d(
inputs=pool3,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)

re1 = tf.reshape(pool4, [-1, 6 * 6 * 128])

# 全连接层
dense1 = tf.layers.dense(inputs=re1,
units=1024,
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
dense2 = tf.layers.dense(inputs=dense1,
units=512,
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
logits = tf.layers.dense(inputs=dense2,
units=20,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

return logits, x, y_

# 返回损失函数的值,准确值等参数
def accCNN(logits, y_):
loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits)
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

return loss, train_op, correct_prediction, acc

# 定义一个函数,按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]

def runable(x_train, y_train, train_op, loss, acc, x, y_, x_val, y_val):
# 训练和测试数据,可将n_epoch设置更大一些
n_epoch = 50
batch_size = 64
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoch):
# training
train_loss, train_acc, n_batch = 0, 0, 0
for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
_, err, ac = sess.run([train_op, loss, acc], feed_dict={x: x_train_a, y_: y_train_a})
train_loss += err
train_acc += ac
n_batch += 1
print("train loss: %f" % (train_loss / n_batch))
print("train acc: %f" % (train_acc / n_batch))

# validation
val_loss, val_acc, n_batch = 0, 0, 0
for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
err, ac = sess.run([loss, acc], feed_dict={x: x_val_a, y_: y_val_a})
val_loss += err
val_acc += ac
n_batch += 1
print("validation loss: %f" % (val_loss / n_batch))
print("validation acc: %f" % (val_acc / n_batch))
print('*' * 50)

sess.close()

if __name__ == '__main__':
imgpath = '../dataset/classify/'

w = 100
h = 100
c = 3

ratio = 0.8  # 选取训练集的比例

data, label = read_img(path=imgpath, w=w, h=h)

data, label = messUpOrder(data=data, label=label)

x_train, y_train, x_val, y_val = segmentation(data=data, label=label, ratio=ratio)

logits, x, y_ = buildCNN(w=w, h=h, c=c)

loss, train_op, correct_prediction, acc = accCNN(logits=logits, y_=y_)

runable(x_train=x_train, y_train=y_train, train_op=train_op, loss=loss,
acc=acc, x=x, y_=y_, x_val=x_val, y_val=y_val)
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: