# Pytorch贝叶斯深度学习库BLiTZ实现LSTM预测时序数据（二）

+关注继续查看

## 创建神经网络类

@variational_estimator
class NN(nn.Module):
def __init__(self):
super(NN, self).__init__()
self.lstm_1 = BayesianLSTM(1, 10)
self.linear = nn.Linear(10, 1)

def forward(self, x):
x_, _ = self.lstm_1(x)

#gathering only the latent end-of-sequence for the linear layer
x_ = x_[:, -1, :]
x_ = self.linear(x_)
return x_

## 创建对象

Xs, ys = create_timestamps_ds(close_prices)
X_train, X_test, y_train, y_test = train_test_split(Xs,
ys,
test_size=.25,
random_state=42,
shuffle=False)
ds = torch.utils.data.TensorDataset(X_train, y_train)

net = NN()

criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)

## 训练循环

iteration = 0
for epoch in range(10):
for i, (datapoints, labels) in enumerate(dataloader_train):

loss = net.sample_elbo(inputs=datapoints,
labels=labels,
criterion=criterion,
sample_nbr=3)
loss.backward()
optimizer.step()

iteration += 1
if iteration%250==0:
preds_test = net(X_test)[:,0].unsqueeze(1)
loss_test = criterion(preds_test, y_test)
print("Iteration: {} Val-loss: {:.4f}".format(str(iteration), loss_test))

## 评估模型并计算置信区间

original = close_prices_unscaled[1:][window_size:]

df_pred = pd.DataFrame(original)
df_pred["Date"] = df.Date
df["Date"] = pd.to_datetime(df_pred["Date"])
df_pred = df_pred.reset_index()

def pred_stock_future(X_test,
future_length,
sample_nbr=10):

#sorry for that, window_size is a global variable, and so are X_train and Xs
global window_size
global X_train
global Xs
global scaler

#creating auxiliar variables for future prediction
preds_test = []
test_begin = X_test[0:1, :, :]
test_deque = deque(test_begin[0,:,0].tolist(), maxlen=window_size)

idx_pred = np.arange(len(X_train), len(Xs))

#predict it and append to list
for i in range(len(X_test)):
#print(i)
as_net_input = torch.tensor(test_deque).unsqueeze(0).unsqueeze(2)
pred = [net(as_net_input).cpu().item() for i in range(sample_nbr)]

test_deque.append(torch.tensor(pred).mean().cpu().item())
preds_test.append(pred)

if i % future_length == 0:
#our inptus become the i index of our X_test
#That tweak just helps us with shape issues
test_begin = X_test[i:i+1, :, :]
test_deque = deque(test_begin[0,:,0].tolist(), maxlen=window_size)

#preds_test = np.array(preds_test).reshape(-1, 1)
#preds_test_unscaled = scaler.inverse_transform(preds_test)

return idx_pred, preds_test

def get_confidence_intervals(preds_test, ci_multiplier):
global scaler

preds_test = torch.tensor(preds_test)

pred_mean = preds_test.mean(1)
pred_std = preds_test.std(1).detach().cpu().numpy()

pred_std = torch.tensor((pred_std))

upper_bound = pred_mean + (pred_std * ci_multiplier)
lower_bound = pred_mean - (pred_std * ci_multiplier)
#gather unscaled confidence intervals

pred_mean_final = pred_mean.unsqueeze(1).detach().cpu().numpy()
pred_mean_unscaled = scaler.inverse_transform(pred_mean_final)

upper_bound_unscaled = upper_bound.unsqueeze(1).detach().cpu().numpy()
upper_bound_unscaled = scaler.inverse_transform(upper_bound_unscaled)

lower_bound_unscaled = lower_bound.unsqueeze(1).detach().cpu().numpy()
lower_bound_unscaled = scaler.inverse_transform(lower_bound_unscaled)

return pred_mean_unscaled, upper_bound_unscaled, lower_bound_unscaled

future_length=7
sample_nbr=4
ci_multiplier=10
idx_pred, preds_test = pred_stock_future(X_test, future_length, sample_nbr)
pred_mean_unscaled, upper_bound_unscaled, lower_bound_unscaled = get_confidence_intervals(preds_test,
ci_multiplier)

y = np.array(df.Close[-750:]).reshape(-1, 1)
under_upper = upper_bound_unscaled > y
over_lower = lower_bound_unscaled < y
total = (under_upper == over_lower)

print("{} our predictions are in our confidence interval".format(np.mean(total)))

## 检查输出图形

params = {"ytick.color" : "w",
"xtick.color" : "w",
"axes.labelcolor" : "w",
"axes.edgecolor" : "w"}
plt.rcParams.update(params)

plt.title("IBM Stock prices", color="white")

plt.plot(df_pred.index,
df_pred.Close,
color='black',
label="Real")

plt.plot(idx_pred,
pred_mean_unscaled,
label="Prediction for {} days, than consult".format(future_length),
color="red")

plt.fill_between(x=idx_pred,
y1=upper_bound_unscaled[:,0],
y2=lower_bound_unscaled[:,0],
facecolor='green',
label="Confidence interval",
alpha=0.5)

plt.legend()

params = {"ytick.color" : "w",
"xtick.color" : "w",
"axes.labelcolor" : "w",
"axes.edgecolor" : "w"}
plt.rcParams.update(params)

plt.title("IBM Stock prices", color="white")

plt.fill_between(x=idx_pred,
y1=upper_bound_unscaled[:,0],
y2=lower_bound_unscaled[:,0],
facecolor='green',
label="Confidence interval",
alpha=0.75)

plt.plot(idx_pred,
df_pred.Close[-len(pred_mean_unscaled):],
label="Real",
alpha=1,
color='black',
linewidth=0.5)

plt.plot(idx_pred,
pred_mean_unscaled,
label="Prediction for {} days, than consult".format(future_length),
color="red",
alpha=0.5)

plt.legend()

## 总结

|
1月前
|

31 0
|
3月前
|

【LSTM时序预测】基于北方苍鹰算法优化长短时记忆NGO-LSTM时序时间序列数据预测（含前后对比）附Matlab完整代码和数据
【LSTM时序预测】基于北方苍鹰算法优化长短时记忆NGO-LSTM时序时间序列数据预测（含前后对比）附Matlab完整代码和数据
63 0
|
4月前
|

71 1
|
5月前
|

【LSTM时序预测】基于长短期记忆网络的时间序列预测附matlab完整代码
【LSTM时序预测】基于长短期记忆网络的时间序列预测附matlab完整代码
94 1
|
5月前
|

194 0
|
8月前
|

LSTM时序预测 MATLAB实现SSA-LSTM、LSTM麻雀算法优化长短期记忆神经网络时间序列预测(含优化前后对比)
LSTM时序预测 MATLAB实现SSA-LSTM、LSTM麻雀算法优化长短期记忆神经网络时间序列预测(含优化前后对比)
157 0
|
8月前
|

【LSTM时序预测】基于长短记忆神经网络LSTM实现交通流时间序列单步预测含验证和预测未来附matlab代码
【LSTM时序预测】基于长短记忆神经网络LSTM实现交通流时间序列单步预测含验证和预测未来附matlab代码
98 0
|
11月前
|

157 0
|
11月前
|

【LSTM时序预测】基于卷积神经网络结合长短时记忆CNN-LSTM实现时序数据预测附matlab代码
【LSTM时序预测】基于卷积神经网络结合长短时记忆CNN-LSTM实现时序数据预测附matlab代码
182 0
|
12月前
|

【LSTM回归预测】基于灰狼算法优化长短时记忆GWO-LSTM时序时间序列数据预测（含前后对比）附Matlab代码
【LSTM回归预测】基于灰狼算法优化长短时记忆GWO-LSTM时序时间序列数据预测（含前后对比）附Matlab代码
134 0