您的位置:首页 > 其它

tensorflow RNN LSTM语言模型

2017-12-02 19:45 423 查看
参考博客:

http://blog.csdn.net/u014595019/article/details/52605693   讲解LSTM的原理

https://www.cnblogs.com/wuzhitj/p/6297992.html    LSTM代码讲解

http://blog.csdn.net/qiqiaiairen/article/details/53239506  LSTM函数详解

首先从如下地方下载PTB数据: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
以上链接讲解详细,主要说明以下:

(1)数据的总量为 batch_size * num_steps * epoch_size

batch_size:一批数据的样本数

num_steps:LSTM单元的展开步数,即横向LSTM序列上有几个单元

epoch_size:训练的次数,训练一次需要batch_size * num_steps个样本,因为每个单元需要输入batch_size个样本

(2)带权重的交叉熵,第3个参数表示batch_size * num_steps个样本的权重都为1,表示在计算交叉熵时各个样本占比都相同

loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(

            [logits],

            [tf.reshape(input_.targets, [-1])],

            [tf.ones([batch_size * num_steps], dtype=tf.float32)]

        )

(3)此段代码的功能为将上次的final_state更新到本次训练的初始state,循环次数为num_layers,即LSTM单元叠加的层数

feed_dict = {}

for i, (c, h) in enumerate(model.initial_state):

     feed_dict[c] = state[i].c

     feed_dict[h] = state[i].h

一次循环的结果:

Tensor("train/Model/MultiRNNCellZeroState/BasicLSTMCellZeroState/zeros:0", shape=(20, 200), dtype=float32)

Tensor("train/Model/MultiRNNCellZeroState/BasicLSTMCellZeroState/zeros_1:0", shape=(20, 200), dtype=float32)

状态state是(20,200)的张量,20是batch_size,200是hidden_size,即向量的维度

(4)这里使用reuse=True,可以共享train_model里的变量(使用get_variable定义的),使用已经训练好的权重softmax_w和softmax_b等

关于共享变量的参考链接:http://blog.csdn.net/winycg/article/details/78650045

with tf.variable_scope("Model", reuse=False, initializer=initializer):

            train_model = PTBModel(is_training=True, config=config, input_=train_input))

with tf.variable_scope("Model", reuse=True, initializer=initializer):

            test_model = PTBModel(is_training=False, config=eval_config, input_=test_data)

举例子:

class A(object):
def __init__(self):
self.a = tf.get_variable('a', [1], dtype=tf.float32)
self.b = tf.assign(self.a, [10])

with tf.variable_scope('AA', reuse=False):
x = A()
with tf.variable_scope('AA', reuse=True):
y = A()

sess = tf.Session()
print(sess.run(x.b))  # [10.]
print(sess.run(y.a))  # [10.]


全部代码:

import reader
import time
import numpy as np
import tensorflow as tf

class PTBInput(object):
def __init__(self, data, config, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = (len(data) // batch_size) // num_steps
self.input_data, self.targets = reader.ptb_producer(data, batch_size, num_steps, name=name)

class PTBModel(object):
def __init__(self, is_training, config, input_):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size

def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)

self._initial_state = cell.zero_state(batch_size, tf.float32)

with tf.device('/cpu:0'):
embedding = tf.get_variable("embeddings", [vocab_size, size], tf.float32)
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)

if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(input, config.keep_prob)

outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0:
tf.get_variable_scope().reuse_variables()
(cell_out, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_out)

output = tf.reshape(tf.concat(outputs, 1), (-1, size))
softmax_w = tf.get_variable("softmax_w", [size, vocab_size], tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab_size], tf.float32)
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=tf.float32)]
)
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state

if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
# zip将梯度和变量结合,利用优化器将梯度应用到可训练的参数
self._train_op = optimizer.apply_gradients(zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)

def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})

@property
def input(self):
return self._input

@property
def initial_state(self):
return self._initial_state

@property
def cost(self):
return self._cost

@property
def final_state(self):
return self._final_state

@property
def lr(self):
return self._lr

@property
def train_op(self):
return self._train_op

class SmallConfig(object):
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000

def run_epoch(session, model, eval_op=None, verbose=False):
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)

for step in range(model.input.epoch_size):
feed_dict = {}
# 更新状态,将上次的final_state更新到本次训练的初始state
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h

fetches = {
"cost": model.cost,
"final_state": model.final_state
}
if eval_op is not None:
fetches["eval_op"] = eval_op

vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps

if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)

train_data, valid_data, test_data, _ = reader.ptb_raw_data('simple-examples/data/')
config = SmallConfig()
eval_config = SmallConfig()
eval_config.batch_size = 1
eval_config.num_steps = 1

with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)

with tf.name_scope('train'):
train_input = PTBInput(config=config, data=train_data, name="train_input")
with tf.variable_scope("Model", reuse=False, initializer=initializer):
train_model = PTBModel(is_training=True, config=config, input_=train_input)

with tf.name_scope('valid'):
valid_input = PTBInput(config=config, data=valid_data, name="valid_input")
# 复用类里的参数
with tf.variable_scope("Model", reuse=True, initializer=initializer):
valid_model = PTBModel(is_training=False, config=config, input_=valid_input)

with tf.name_scope('test'):
test_data = PTBInput(config=eval_config, data=test_data,name="test_input")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
test_model = PTBModel(is_training=False, config=eval_config, input_=test_data)

sv = tf.train.Supervisor()
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(config.max_epoch, 0)
train_model.assign_lr(session, config.learning_rate * lr_decay)

print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(train_model.lr)))
train_perplexity = run_epoch(session, train_model, eval_op=train_model.train_op, verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, valid_model)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, test_model)
print("Epoch: %d Test Perplexity: %.3f" % (i + 1, test_perplexity))
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: