您的位置:首页 > 编程语言 > Python开发

Python语言基于Tensorflow实现RNN(预测)

2018-03-28 16:59 971 查看
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import csv
from pylab import*
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot
import math
from random import random
from sklearn.model_selection import KFold
import random
n_inputs = 6 #输入一行,一行有6个数据
max_time = 24 #一共24行
lstm_size = 35 #隐层单元
n_classes = 24 # 预测24个值
batch_size = 32 #每批次50个样本
n_batch = 32 // batch_size #计算一共有多少个批次
i=0
j=[]
data = []
X = []
indicess = []
xback =24
with open(r'D:\晴天新.csv') as f:
reader = csv.reader(f)
for row in reader:
if i == 0:
i += 1
continue
            else:
data.append(row[:])#提取出每一行中的2:14列
data = np.array(data)
m,n = np.shape(data)
for i in range(m):
for j in range(n):
data[i][j] = data[i][j].astype('float64')#是从第三列开始的
data = data.astype('float64')
y = data[:,-1]
y1 = data[:,-1]
set1 = data[:,:-1]
set2 = data[:,-1]
def create_interval_dataset(dataset1, dataset2, xback):
dataX, dataY = [], []
for i in range(0, len(dataset1)-xback,24):
dataX.append(dataset1[i:i+xback])
dataY.append(dataset2[i:i + xback])
return np.asarray(dataX), np.asarray(dataY)

dataX, dataY = create_interval_dataset(set1, set2, 24)
dataY=np.reshape(dataY, [-1,24])
from sklearn.model_selection import KFold
MAPE2 = []
RMSE2 = []
MABE2 = []
kf =KFold(n_splits=10, random_state=None, shuffle=False)
for train_index, test_index in kf.split(dataX):
print("TRAIN:", train_index, "TEST:", test_index)
X_tr, X_te = dataX[train_index], dataX[test_index]
y_tr, y_te = dataY[train_index], dataY[test_index]
X_tr = np.reshape(X_tr, [-1,6])
X_te = np.reshape(X_te, [-1,6])
y_tr = np.reshape(y_tr, [-1, 24])
y_te = np.reshape(y_te, [-1, 24])
scaler = MinMaxScaler(feature_range=(0, 1))
scaler1 = MinMaxScaler(feature_range=(0, 1))
X_tr = scaler.fit_transform(X_tr)
X_te = scaler1.fit_transform(X_te)
scaler2 = MinMaxScaler(feature_range=(0, 1))
y_tr = scaler2.fit_transform(y_tr.reshape(-1, 24))
scaler3 = MinMaxScaler(feature_range=(0, 1))
y_te = scaler3.fit_transform(y_te.reshape(-1, 24))
y_tr = np.reshape(y_tr, [-1, 24])
y_te = np.reshape(y_te, [-1, 24])
y_te = y_te.astype('float64')
y_tr = y_tr.astype('float64')
x = tf.placeholder(tf.float32, [None, 6])
y = tf.placeholder(tf.float32, [None, 24])
weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[n_classes]))
# 定义RNN网络
def RNN(X, weights, biases):
# inputs=[batch_size, max_time, n_inputs]
inputs = tf.reshape(X, [-1, max_time, n_inputs])
print("the shape of inputs",np.shape(inputs))
# 定义LSTM基本CELL
lstm_cell = tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(lstm_size)
# final_state[0]是cell state
# final_state[1]是hidden_state
outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype=tf.float32)
print("the shape of outputs",np.shape(outputs))
print("the shape of weights",np.shape(weights))
results = tf.matmul(outputs[-1], weights) + biases
return results
# 计算RNN的返回结果
prediction = RNN(x, weights, biases)
# 损失函数
cross_entropy = tf.reduce_mean(tf.square(y - prediction))
# 使用AdamOptimizer进行优化
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# 结果存放在一个布尔型列表中
# correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
# 求准确率
# accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#把correct_prediction变为float32类型
# 初始化
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for _ in range(400):
sess.run(train_step, feed_dict={x: X_tr, y: y_tr})
pred_X1 = sess.run(prediction, feed_dict={x: X_te})
pred_X = sess.run(prediction, feed_dict={x: X_tr})
y_tr = scaler2.inverse_transform(y_tr.reshape(-1, 24))
y_te = scaler3.inverse_transform(y_te.reshape(-1, 24))
pred_X = scaler3.inverse_transform(pred_X.reshape(-1, 24))
prediction_train1 = scaler2.inverse_transform(pred_X1.reshape(-1, 24))
y_tr = y_tr.flatten()
y_te = y_te.flatten()
pred_X = pred_X.flatten()
pred_X1 = pred_X1.flatten()
fei01 = []
for i in range(len(y_te)):
if y_te[i] != 0:
fei01.append(i)
xina1 = []
xinb1 = []
for i in fei01:
xina1.append(y_te[i])
xinb1.append(pred_X[i])
v = list(map(lambda x: (abs((x[0] - x[1]) / x[0])), zip(xina1, xinb1)))
loss = sum(v) * 100 / len(y_te)
MAPE2.append(loss)

v = list(map(lambda x: ((pow((x[0] - x[1]), 2))), zip(xina1, xinb1)))
loss = math.sqrt(sum(v) / len(y_te))
RMSE2.append(loss)

v = list(map(lambda x: (abs((x[0] - x[1]))), zip(xina1, xinb1)))
loss = sum(v) / len(y_te)
MABE2.append(loss)
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: