Tensorflow框架下实现Mnist数字识别
2017-04-18 20:00
525 查看
下面是实现数字识别的程序代码,注意相应的路径:
#!/usr/bin/python
import input_data #方法一,下载好,并保存到指定的路径下
mnist = input_data.read_data_sets('/home/yuan/TestMnist', one_hot=True)
import tensorflow as tf
import sys
from tensorflow.examples.tutorials.mnist import input_data #方法二,网上下载
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.InteractiveSession()
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Now image size is reduced to 7*7
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print "step %d, training accuracy %.3f"%(i, train_accuracy)
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print "Training finished"
print "test accuracy %.3f" % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
下面是程序的部分结果:
Extracting /home/yuan/TestMnist/train-images-idx3-ubyte.gz
Extracting /home/yuan/TestMnist/train-labels-idx1-ubyte.gz
Extracting /home/yuan/TestMnist/t10k-images-idx3-ubyte.gz
Extracting /home/yuan/TestMnist/t10k-labels-idx1-ubyte.gz
step 0, training accuracy 0.100
step 100, training accuracy 0.800
step 200, training accuracy 0.860
step 300, training accuracy 0.800
step 400, training accuracy 0.980
step 500, training accuracy 0.880
step 600, training accuracy 1.000
step 700, training accuracy 0.960
step 800, training accuracy 0.900
step 900, training accuracy 1.000
..
..
..
..
step 19600, training accuracy 1.000
step 19700, training accuracy 1.000
step 19800, training accuracy 1.000
step 19900, training accuracy 1.000
Training finished
test accuracy 0.993
[Finished in 22256.5s]
#!/usr/bin/python
import input_data #方法一,下载好,并保存到指定的路径下
mnist = input_data.read_data_sets('/home/yuan/TestMnist', one_hot=True)
import tensorflow as tf
import sys
from tensorflow.examples.tutorials.mnist import input_data #方法二,网上下载
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.InteractiveSession()
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Now image size is reduced to 7*7
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.initialize_all_variables())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print "step %d, training accuracy %.3f"%(i, train_accuracy)
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print "Training finished"
print "test accuracy %.3f" % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})
下面是程序的部分结果:
Extracting /home/yuan/TestMnist/train-images-idx3-ubyte.gz
Extracting /home/yuan/TestMnist/train-labels-idx1-ubyte.gz
Extracting /home/yuan/TestMnist/t10k-images-idx3-ubyte.gz
Extracting /home/yuan/TestMnist/t10k-labels-idx1-ubyte.gz
step 0, training accuracy 0.100
step 100, training accuracy 0.800
step 200, training accuracy 0.860
step 300, training accuracy 0.800
step 400, training accuracy 0.980
step 500, training accuracy 0.880
step 600, training accuracy 1.000
step 700, training accuracy 0.960
step 800, training accuracy 0.900
step 900, training accuracy 1.000
..
..
..
..
step 19600, training accuracy 1.000
step 19700, training accuracy 1.000
step 19800, training accuracy 1.000
step 19900, training accuracy 1.000
Training finished
test accuracy 0.993
[Finished in 22256.5s]
相关文章推荐
- 深度学习-传统神经网络使用TensorFlow框架实现MNIST手写数字识别
- 深度学习-CNN卷积神经网络使用TensorFlow框架实现MNIST手写数字识别
- TensorFlow学习---实现mnist手写数字识别
- 使用Tensorflow实现CNN进行MNIST数字识别
- Deep Learning-TensorFlow (1) CNN卷积神经网络_MNIST手写数字识别代码实现
- TensorFlow实现机器学习的“Hello World”--Mnist手写数字识别
- Python(TensorFlow框架)实现手写数字识别系统
- TensorFlow代码实现(一)[MNIST手写数字识别]
- tensorflow 学习专栏(五):在mnist数据集上使用tensorflow实现临近算法(Nearest-Neighbor)进行手写数字识别
- TensorFlow 深度学习框架(6)-- mnist 数字识别及不同模型效果比较
- TensorFlow实现mnist数字识别——两层全连接实现
- TensorFlow之CNN实现MNIST手写数字识别
- 用Tensorflow搭建CNN卷积神经网络,实现MNIST手写数字识别
- TensorFlow DNN 实现MNIST数字识别(初版)
- TensorFlow 深度学习框架(6)-- mnist 数字识别及不同模型效果比较
- 【TensorFlow-windows】(一)实现Softmax Regression进行手写数字识别(mnist)
- 使用tensorflow卷积神经网络实现mnist手写数字识别
- MNIST数字识别问题之tensorflow实现
- (Tensorflow之八)MNIST数字识别源码--实战Google深度学习框架5.2小节
- tensorflow——用RNN实现MNIST手写数字识别