您的位置:首页 > 其它

Tensorflow学习精要版I---MNIST的训练附加测试自己的图片

2017-02-12 15:25 363 查看

初识

Tensorflow里面的Python语言中的Tensor是numpy ndarray对象。

# 创建一个变量, 初始化为标量 0.
state = tf.Variable(0, name="counter")

# 创建一个 op, 其作用是使 state 增加 1

one = tf.constant(1)
new_value = tf.add(state, one)
#赋值
update = tf.assign(state, new_value)

# 启动图, 运行 op
with tf.Session() as sess:
#初始化参数
tf.global_variables_initializer().run()

# 打印 'state' 的初始值
print sess.run(state)
# 运行 op, 更新 'state', 并打印 'state'
for _ in range(3):
sess.run(update)
print sess.run(state)

# 输出:

# 0
# 1
# 2
# 3


可见,直接
sess.run(Tensor)
可以或者直接返回该Tensor的结果。显然,我们可以传入更多的Tensor来同时返回更多的结果。比如下面同时传入mul和intermed张量。

input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
intermed = tf.add(input2, input3)
mul = tf.mul(input1, intermed)

with tf.Session() as sess:
result = sess.run([mul, intermed])
print result

# 输出:
# [array([ 21.], dtype=float32), array([ 7.], dtype=float32)]


发现了吗?前面都是除了constant就是variable,觉得没啥意思,功能有点挫。TF还提供了
tf.placeholder()
,这东西看名字就知道是占位符啊。有啥用啊,可以随时替换节点的输出。其可以设置该变量的类型,数据的维度之类的。比如tf.placeholder(‘uint8’,[None,None,3]),就可以处理任何3个通道的图片。placeholder要与feed连用。feed就是给placeholder创建的占位符喂入数据。

input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.mul(input1, input2)

with tf.Session() as sess:
print sess.run([output], feed_dict={input1:[7.], input2:[2.]})

# 输出:
# [array([ 14.], dtype=float32)]


Mnist例子初步

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

x = tf.placeholder(tf.float32,[None, 784])

W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x,W) + b)

#compute cross-entropy
y_ = tf.placeholder(tf.float32,[None,10])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

with tf.Session() as sess:
tf.global_variables_initializer().run()

for i in range(1000):
print i
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x:batch_xs, y_:batch_ys})

correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))


分析:

1. reduce_sum/reduce_mean

表示在某一维度进行reduce,就是计算压缩的意思。所以reduce_sum就是计算沿某一维度计算均值,使其变成1。
tf.reduce_sum(Tensor, reduction_indices = [1]))
就是让沿第二维求和,这样就把一张图片784个值的每个值都与计算得到的784个值一一对应计算 −∑iy′ilog(yi), 当然了,最后还要对一个batch的所有100张图片计算平均交叉熵。

2. argmax

表示求某一维的最大值。很简单吧。这里 tf.argmax(y_, 1) 就是查看预测的最大值呗。因为预测的label是以(100, 10)显示的。对于每一张图,有10个数,数字最大的则为其预测标签。

3. cast

强制转换数据类型,没啥好说的。

测试自己的MNIST图片

# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

x = tf.placeholder(tf.float32,[None, 784])

W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x,W) + b)

# 利用 arg_max来得到最大的预测。softmax,你懂的。
y_max = tf.arg_max(y, 1)

#compute cross-entropy
y_ = tf.placeholder(tf.float32,[None,10])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)

# 读取单张图片
pic_name = '1.jpg'
image_raw_data = tf.gfile.FastGFile(pic_name).read()
own_img_f = tf.image.decode_image(image_raw_data)

#这里将其转换为[0,1]之间。因为上面训练好的模型输入数据就是[0,1]之间的。
own_img = tf.image.convert_image_dtype(image=own_img_f,dtype=tf.float32)

with tf.Session() as sess:
print(tf.shape(mnist.test.images[1]))
tf.global_variables_initializer().run()

for i in range(1000):
print(i)
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x:batch_xs, y_:batch_ys})

correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))

#选择预测MNIST数据集的第几张图片
idx_pic = 0
print('If I predict the '+ str(idx_pic+1) +'st image of MNIST,' + ' then get:')
print('The scores for each label:')

#由于y结点的依赖结点x是定义成[None,784],所以可以接受任何[x,784]大小的数据。
# 所以我们利用已经学到的模型,而mnist.test.images[idx_pic]是[784]的数据,所以你要进行扩维啊
# 扩成 [1, 784],所以用 tf.expand_dims.
# 但是这些都是op,都是返回tensor的。所以不能直接喂入x里面。需要把tensor转换为numpy,用eval即可。
print(sess.run(y, feed_dict={x:tf.expand_dims(mnist.test.images[idx_pic], 0).eval()}))
print(sess.run(y_max, feed_dict={x:tf.expand_dims(mnist.test.images[idx_pic], 0).eval()}))

print('If I predict the image,'+ pic_name + ' then got:')
# 读入的数据是[28,28,1]的,所以先拉成一维的,再扩维成[1,784]即可。
print(sess.run(y_max, feed_dict={x: tf.expand_dims(tf.reshape(own_img, [-1]), 0).eval()}))


重要的知识:用 .eval()可以将tensor数据转换为numpy数据。这是因为Session.run或者eval函数返回的tensor都是Numpy array

>>> print(type(tf.Session().run(tf.constant([1,2,3]))))
<class 'numpy.ndarray'>
#或者
>>> sess = tf.InteractiveSession()
>>> print(type(tf.constant([1,2,3]).eval()))
<class 'numpy.ndarray'>
#或者
>>> sess = tf.Session()
>>> with sess.as_default():
>>>    print(type(tf.constant([1,2,3]).eval()))
<class 'numpy.ndarray'>


Mnist例子进阶—使用卷积网络

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

import tensorflow as tf
sess = tf.InteractiveSession()

x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)

def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)

def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')

W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])

x_image = tf.reshape(x, [-1,28,28,1])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

#dropout
# keep_prob是一个float32的概率值,即在第一层全连接层后面加上一个dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

#仍旧是类似单层softmax regression的方式
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
#交互式session时,节点直接进行eval
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: