tensorflow 使用正则化
2017-08-09 13:13
260 查看
Tensorflow 使用正则化T
import tensorflow.contrib.layers as layers
def main(_):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
# Make a network with regularization
y_conv = easier_network(x, FLAGS.regu)
weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'EasyNet')
print("")
for w in weights:
shp = w.get_shape().as_list()
print("- {} shape:{} size:{}".format(w.name, shp, np.prod(shp)))
print("")
reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'EasyNet')
for w in reg_ws:
shp = w.get_shape().as_list()
print("- {} shape:{} size:{}".format(w.name, shp, np.prod(shp)))
print("")
# Make the loss function `loss_fn` with regularization.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
loss_fn = cross_entropy + tf.reduce_sum(reg_ws)
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss_fn)
tf.GraphKeys.REGULARIZATION_LOSSES得到在图中正则化的损失
regularizer=tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(weight_decay),gen_vars+d_vars)这样也可以
import tensorflow.contrib.layers as layers
def easier_network(x, reg): """ A network based on tf.contrib.learn, with input `x`. """ with tf.variable_scope('EasyNet'): out = layers.flatten(x) out = layers.fully_connected(out, num_outputs=200, weights_initializer = layers.xavier_initializer(uniform=True), weights_regularizer = layers.l2_regularizer(scale=reg), activation_fn = tf.nn.tanh) out = layers.fully_connected(out, num_outputs=200, weights_initializer = layers.xavier_initializer(uniform=True), weights_regularizer = layers.l2_regularizer(scale=reg), activation_fn = tf.nn.tanh) out = layers.fully_connected(out, num_outputs=10, # Because there are ten digits! weights_initializer = layers.xavier_initializer(uniform=True), weights_regularizer = layers.l2_regularizer(scale=reg), activation_fn = None) return out
def main(_):
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
# Make a network with regularization
y_conv = easier_network(x, FLAGS.regu)
weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'EasyNet')
print("")
for w in weights:
shp = w.get_shape().as_list()
print("- {} shape:{} size:{}".format(w.name, shp, np.prod(shp)))
print("")
reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'EasyNet')
for w in reg_ws:
shp = w.get_shape().as_list()
print("- {} shape:{} size:{}".format(w.name, shp, np.prod(shp)))
print("")
# Make the loss function `loss_fn` with regularization.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
loss_fn = cross_entropy + tf.reduce_sum(reg_ws)
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss_fn)
tf.GraphKeys.REGULARIZATION_LOSSES得到在图中正则化的损失
regularizer=tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(weight_decay),gen_vars+d_vars)这样也可以
相关文章推荐
- 基于MNIST数据集使用TensorFlow训练一个没有隐含层的浅层神经网络
- 【tensorflow 学习】 gpu使用
- tensorflow不同位置使用相同的name_scope
- 深度学习四:tensorflow-使用卷积神经网络识别手写数字
- TensorFlow学习笔记【一】 基本使用
- 【TensorFlow】MNIST(使用CNN)
- 使用sklearn进行数据预处理 —— 标准化/归一化/正则化
- TensorFlow学习笔记(一):快速安装与使用TensorFlow
- Tensorflow学习笔记--使用keras完成文本情感分类问题
- tensorflow使用神经网络实现mnist分类
- tensorflow 使用flags定义命令行参数的方法
- 关于Tensorflow中的tf.train.batch函数的使用
- Tensorflow同时加载使用多个模型
- 使用tensorflow的lstm网络进行时间序列预测
- TensorFlow笔记(二)--Tensor的基本使用
- Ubuntu 16.04 Tensorflow 使用源码安装
- 使用Docker运行TensorFlow
- 使用Tensorflow和MNIST识别自己手写的数字
- TensorFlow基本使用
- 在spyder下使用TensorFlow