Tensorflow-MNIST数字识别练习代码
2017-04-26 19:49
501 查看
Tensorflow-MNIST数字识别练习代码
方案一 训练代码 + 验证代码
方案二 训练 + 验证代码
文件 mnis_inference.py
# -- coding: utf-8 --
import tensorflow as tf
#层节点
INPUT_NODE = 784
LAYER1_NODE = 500
OUTPUT_NODE = 10
#获取权值weights
def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
print("test_test")
return weights
#前向传播函数
def inference(input_tensor, regularizer):
#layer1
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
#layer2
with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2
文件 mnist_train.py
# -- coding: utf-8 --
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
#数据batch大小
BATCH_SIZE = 100
#训练参数
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE= 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
#模型保存路径及文件名
MODEL_SAVE_PATH = "/model2/"
MODEL_NAME = "model.ckpt"
def train(mnist):
#输入层和数据label
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
#前向传播结果y
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = mnist_inference.inference(x, regularizer)
global_step = tf.Variable(0, trainable=False)
#滑动平均模型
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
#计算交叉熵,并加入正则-->损失函数loss
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
#学习率
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)
#train_step 梯度下降(学习率,损失函数,全局步数)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
#运算图控制,用train_op作集合
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
#持久化
saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y_:ys})
#每1000轮保存一次
if i%1000 == 0:
print("After %d training step(s), loss on training batch is %g " %(step, loss_value))
saver.save(sess, "./model2/model.ckpt")
def main(argv=None):
mnist = input_data.read_data_sets("mnist_data/", one_hot=True)
train(mnist)
if __name__== '__main__':
tf.app.run()
文件 mnist_eval.py
# -- coding: utf-8 --
import os
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import mnist_train
EVAL_INTERVAL_SECS = 10
#模型保存路径及文件名
MODEL_SAVE_PATH = "/model2/"
MODEL_NAME = "model.ckpt"
def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_ :mnist.validation.labels}
y = mnist_inference.inference(x, None)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
saver.restore(sess, "./model2/model.ckpt")
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("**********accuracy = %g", accuracy_score)
def main(argv=None):
mnist = input_data.read_data_sets("mnist_data/", one_hot=True)
evaluate(mnist)
if __name__== '__main__':
tf.app.run()
方案一 训练代码 + 验证代码
# -- coding: utf-8 -- import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data #层节点 INPUT_NODE = 4000 784 LAYER1_NODE = 500 OUTPUT_NODE = 10 #数据batch大小 BATCH_SIZE = 100 #训练参数 LEARNING_RATE_BASE = 0.8 LEARNING_RATE_DECAY = 0.99 REGULARIZATION_RATE= 0.0001 TRAINING_STEPS = 30000 MOVING_AVERAGE_DECAY = 0.99 #前向传播函数 def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2): if avg_class == None: layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1) return tf.matmul(layer1, weights2) + biases2 else: layer1 = tf.nn.relu(tf.matmul(input_tensor,avg_class.average(weights1))+avg_class.average(biases1)) return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2) def train(mnist): #输入层和数据label x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input') y_ = tf.placeholder(tf.float32, [None,OUTPUT_NODE], name='y-input') #隐藏层参数初始化 weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE,LAYER1_NODE], stddev=0.1)) biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE])) #输出层参数初始化 weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1)) biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE])) #前向传播结果y y = inference(x, None, weights1, biases1, weights2, biases2) #use for count the train step , trainable=False global_step = tf.Variable(0, trainable=False) #滑动平均模型,及加入滑动平均的前向传播结果average_y variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2) #计算交叉熵,并加入正则-->损失函数loss cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y) cross_entropy_mean = tf.reduce_mean(cross_entropy) regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE) regularization = regularizer(weights1) + regularizer(weights2) loss = cross_entropy_mean + regularization #学习率 learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY) #train_step 梯度下降(学习率,损失函数,全局步数) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) #运算图控制,用train_op作集合 with tf.control_dependencies([train_step, variables_averages_op]): train_op = tf.no_op(name='train') #判断准确率 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #持久化 saver = tf.train.Saver() with tf.Session() as sess: tf.initialize_all_variables().run() validate_feed = {x:mnist.validation.images,y_:mnist.validation.labels} test_feed = {x:mnist.test.images,y_:mnist.test.labels} for i in range(TRAINING_STEPS): #每1000轮测试一次 if i%1000 == 0: validate_acc = sess.run(accuracy, feed_dict=validate_feed) print("After %d training step(s), validation accuracy using average model is %g " %(i,validate_acc)) xs,ys = mnist.train.next_batch(BATCH_SIZE) sess.run(train_op, feed_dict={x:xs, y_:ys}) saver.save(sess,"./model/model.ckpt") test_acc = sess.run(accuracy, feed_dict=test_feed) print("After %d training step(s), test accuracy using average model is %g" %(TRAINING_STEPS, test_acc)) def main(argv=None): mnist = input_data.read_data_sets("mnist_data/", one_hot=True) train(mnist) if __name__== '__main__': tf.app.run()
方案二 训练 + 验证代码
文件 mnis_inference.py
# -- coding: utf-8 --
import tensorflow as tf
#层节点
INPUT_NODE = 784
LAYER1_NODE = 500
OUTPUT_NODE = 10
#获取权值weights
def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
print("test_test")
return weights
#前向传播函数
def inference(input_tensor, regularizer):
#layer1
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
#layer2
with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2
文件 mnist_train.py
# -- coding: utf-8 --
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
#数据batch大小
BATCH_SIZE = 100
#训练参数
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE= 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
#模型保存路径及文件名
MODEL_SAVE_PATH = "/model2/"
MODEL_NAME = "model.ckpt"
def train(mnist):
#输入层和数据label
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
#前向传播结果y
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = mnist_inference.inference(x, regularizer)
global_step = tf.Variable(0, trainable=False)
#滑动平均模型
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
#计算交叉熵,并加入正则-->损失函数loss
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
#学习率
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)
#train_step 梯度下降(学习率,损失函数,全局步数)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
#运算图控制,用train_op作集合
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
#持久化
saver = tf.train.Saver()
with tf.Session() as sess:
tf.initialize_all_variables().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y_:ys})
#每1000轮保存一次
if i%1000 == 0:
print("After %d training step(s), loss on training batch is %g " %(step, loss_value))
saver.save(sess, "./model2/model.ckpt")
def main(argv=None):
mnist = input_data.read_data_sets("mnist_data/", one_hot=True)
train(mnist)
if __name__== '__main__':
tf.app.run()
文件 mnist_eval.py
# -- coding: utf-8 --
import os
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import mnist_train
EVAL_INTERVAL_SECS = 10
#模型保存路径及文件名
MODEL_SAVE_PATH = "/model2/"
MODEL_NAME = "model.ckpt"
def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images, y_ :mnist.validation.labels}
y = mnist_inference.inference(x, None)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
saver.restore(sess, "./model2/model.ckpt")
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("**********accuracy = %g", accuracy_score)
def main(argv=None):
mnist = input_data.read_data_sets("mnist_data/", one_hot=True)
evaluate(mnist)
if __name__== '__main__':
tf.app.run()
相关文章推荐
- Deep Learning-TensorFlow (1) CNN卷积神经网络_MNIST手写数字识别代码实现
- TensorFlow代码实现(一)[MNIST手写数字识别]
- Deep Learning-TensorFlow (1) CNN卷积神经网络_MNIST手写数字识别代码实现详解
- TensorFlow手写数字MNIST识别,两层卷积神经网路(代码及代码注释)最后的准确率0.99
- 【深度学习】笔记2_caffe自带的第一个例子,Mnist手写数字识别代码,过程,网络详解
- 用卷积神经网络对mnist进行数字识别程序(tensorflow)
- tensorflow中logistic识别mnist手写数字
- 深度学习笔记——TensorFlow学习笔记(三)使用TensorFlow实现的神经网络进行MNIST手写体数字识别
- TensorFlow学习_02_CNN卷积神经网络_Mnist手写数字识别
- tensorflow-mnist手写数字识别
- TensorFlow DNN 实现MNIST数字识别(初版)
- 手写数字识别mnist-demo 代码整理总结
- 【TensorFlow-windows】(一)实现Softmax Regression进行手写数字识别(mnist)
- 使用Tensorflow和MNIST识别自己手写的数字
- 用Tensorflow搭建CNN卷积神经网络,实现MNIST手写数字识别
- 基于tensorflow的MNIST手写数字识别(三)--神经网络篇
- 基于tensorflow的MNIST手写数字识别
- 基于tensorflow的MNIST手写数字识别--入门篇
- 基于Tensorflow的MNIST手写数字识别(一)
- TensorFlow实战——CNN(LeNet5)——MNIST数字识别