Pytorch LSTM 时间序列预测
2017-12-02 16:44
851 查看
详情可以参见文章import torch
import torch.nn as nn
from torch.autograd import *
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
def SeriesGen(N):
x = torch.arange(1,N,0.01)
return torch.sin(x)
def trainDataGen(seq,k):
dat = list()
L = len(seq)
for i in range(L-k-1):
indat = seq[i:i+k]
outdat = seq[i+1:i+k+1]
dat.append((indat,outdat))
return dat
def ToVariable(x):
tmp = torch.FloatTensor(x)
return Variable(tmp)
y = SeriesGen(10)
dat = trainDataGen(y.numpy(),10)
class LSTMpred(nn.Module):
def __init__(self,input_size,hidden_dim):
super(LSTMpred,self).__init__()
self.input_dim = input_size
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_size,hidden_dim)
self.hidden2out = nn.Linear(hidden_dim,1)
self.hidden = self.init_hidden()
def init_hidden(self):
return (Variable(torch.zeros(1, 1, self.hidden_dim)),
Variable(torch.zeros(1, 1, self.hidden_dim)))
def forward(self,seq):
lstm_out, self.hidden = self.lstm(
seq.view(len(seq), 1, -1), self.hidden)
outdat = self.hidden2out(lstm_out.view(len(seq),-1))
return outdat
model = LSTMpred(1,6)
loss_function = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in range(10):
print(epoch)
for seq, outs in dat[:700]:
seq = ToVariable(seq)
outs = ToVariable(outs)
#outs = torch.from_numpy(np.array([outs]))
optimizer.zero_grad()
model.hidden = model.init_hidden()
modout = model(seq)
loss = loss_function(modout, outs)
loss.backward()
optimizer.step()
predDat = []
for seq, trueVal in dat[700:]:
seq = ToVariable(seq)
trueVal = ToVariable(trueVal)
predDat.append(model(seq)[-1].data.numpy()[0])
fig = plt.figure()
plt.plot(y.numpy())
plt.plot(range(700,890),predDat)
plt.show()
import torch.nn as nn
from torch.autograd import *
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
def SeriesGen(N):
x = torch.arange(1,N,0.01)
return torch.sin(x)
def trainDataGen(seq,k):
dat = list()
L = len(seq)
for i in range(L-k-1):
indat = seq[i:i+k]
outdat = seq[i+1:i+k+1]
dat.append((indat,outdat))
return dat
def ToVariable(x):
tmp = torch.FloatTensor(x)
return Variable(tmp)
y = SeriesGen(10)
dat = trainDataGen(y.numpy(),10)
class LSTMpred(nn.Module):
def __init__(self,input_size,hidden_dim):
super(LSTMpred,self).__init__()
self.input_dim = input_size
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(input_size,hidden_dim)
self.hidden2out = nn.Linear(hidden_dim,1)
self.hidden = self.init_hidden()
def init_hidden(self):
return (Variable(torch.zeros(1, 1, self.hidden_dim)),
Variable(torch.zeros(1, 1, self.hidden_dim)))
def forward(self,seq):
lstm_out, self.hidden = self.lstm(
seq.view(len(seq), 1, -1), self.hidden)
outdat = self.hidden2out(lstm_out.view(len(seq),-1))
return outdat
model = LSTMpred(1,6)
loss_function = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in range(10):
print(epoch)
for seq, outs in dat[:700]:
seq = ToVariable(seq)
outs = ToVariable(outs)
#outs = torch.from_numpy(np.array([outs]))
optimizer.zero_grad()
model.hidden = model.init_hidden()
modout = model(seq)
loss = loss_function(modout, outs)
loss.backward()
optimizer.step()
predDat = []
for seq, trueVal in dat[700:]:
seq = ToVariable(seq)
trueVal = ToVariable(trueVal)
predDat.append(model(seq)[-1].data.numpy()[0])
fig = plt.figure()
plt.plot(y.numpy())
plt.plot(range(700,890),predDat)
plt.show()
相关文章推荐
- PyTorch快速入门教程六(使用LSTM做图片分类)
- Pytorch LSTM 词性判断
- pytorch+lstm实现的pos
- Pytorch Exercise: Augmenting the LSTM part-of-speech tagger with character-level features
- PyTorch快速入门教程九(使用LSTM来做判别每个词的词性)
- Pytorch Bi-LSTM + CRF 代码详解
- PyTorch快速入门教程九(使用LSTM来做判别每个词的词性)
- pytorch GAN生成对抗网络
- pytorch: Variable detach 与 detach_
- PyTorch基本用法(五)——分类
- 89.89% on CIFAR-10 in Pytorch
- PyTorch:Mark一下找到的一些入门资源合集
- pytorch入门(3)pytorch-seq2seq模型
- PyTorch笔记5-save和load神经网络
- pytorch安装----CPU版的
- pytorch 0.3发布(0.3.0b0),更新信息以及更新步骤
- 浅谈将Pytorch模型从CPU转换成GPU
- Anaconda+Pytorch安装教程
- pytorch + visdom 处理简单分类问题
- pytorch 多GPU训练