self_example/pytorch_example/RUL/otherIdea/dctChannelEmbedLSTM/test.py

135 lines
4.7 KiB
Python

# -*- encoding:utf-8 -*-
'''
@Author : dingjiawen
@Date : 2023/11/10 16:27
@Usage :
@Desc :
'''
import numpy as np
import torch
from RUL.otherIdea.dctChannelEmbedLSTM.loadData import getDataset, getTotalData
from RUL.otherIdea.dctChannelEmbedLSTM.model import PredictModel
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
def plot_prediction(total_data, predicted_data_easy, predicted_data_hard):
pic1 = plt.figure(figsize=(8, 6), dpi=200)
'''保存的模型参数的路径'''
from matplotlib import rcParams
config = {
"font.family": 'Times New Roman', # 设置字体类型
"axes.unicode_minus": False, # 解决负号无法显示的问题
"axes.labelsize": 13
}
rcParams.update(config)
# 简单预测图
plt.subplot(2, 1, 1)
plt.plot(total_data)
plt.plot(predicted_data_easy)
plt.title('Easy Prediction')
plt.xlabel('time')
plt.ylabel('loss')
# plt.legend(loc='upper right')
# 困难预测图
plt.subplot(2, 1, 2)
plt.plot(total_data)
plt.plot(predicted_data_hard)
plt.title('Easy Prediction')
plt.xlabel('time')
plt.ylabel('loss')
# plt.legend(loc='upper right')
# plt.scatter()
plt.show()
# 仅使用预测出来的最新的一个点预测以后
def predictOneByOne(model, train_data, predict_num=50):
# 取出训练数据的最后一条
each_predict_data = train_data
predicted_list = np.empty(shape=(predict_num, 1)) # (5,filter_num,30)
# all_data = total_data # (1201,)
for each_predict in range(predict_num):
# predicted_data.shape : (1,1)
predicted_data = model(each_predict_data).cpu().detach().numpy()[-1] # (batch_size,filer_num,1)
predicted_list[each_predict] = predicted_data
each_predict_data = each_predict_data.numpy()
# (1,1) => (10,1)
# 中间拼接过程: (1) => (10) => (40,10) => (30,40,10)
a = np.concatenate([each_predict_data[-1, -1, 1:], predicted_data], axis=0)
b = np.concatenate([each_predict_data[-1, 1:, :], np.expand_dims(a, axis=0)], axis=0)
c = np.concatenate([each_predict_data[1:, :, :], np.expand_dims(b, axis=0)], axis=0)
each_predict_data = torch.tensor(c)
return np.squeeze(predicted_list)
def test(hidden_num, feature, predict_num, batch_size, save_path, is_single=True, is_norm=False):
total_data, total_dataset = getTotalData(hidden_num, feature, is_single=is_single, is_norm=is_norm)
train_dataset, val_dataset = getDataset(hidden_num, feature, predict_num=predict_num, is_single=is_single,
is_norm=is_norm)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False)
val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)
# 加载网络
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = PredictModel(input_dim=feature).to(device)
model.load_state_dict(
torch.load(save_path, map_location=device)
)
print(model)
params_num = sum(param.numel() for param in model.parameters())
print('参数数量:{}'.format(params_num))
model.eval()
predicted_data_easy = total_data[:hidden_num + feature, ]
predicted_data_hard = total_data[:hidden_num + feature, ]
with torch.no_grad():
for batch_idx, (data, label) in enumerate(train_loader):
data, label = data.to(device), label.to(device)
last_train_data = data
each_predicted_data = torch.squeeze(model(data)).cpu().detach().numpy()
predicted_data_easy = np.concatenate(
[predicted_data_easy, each_predicted_data],
axis=0)
predicted_data_hard = np.concatenate(
[predicted_data_hard, each_predicted_data],
axis=0)
# 简单版的,每次预测重新用已知的
for batch_idx, (data, label) in enumerate(val_loader):
data, label = data.to(device), label.to(device)
each_predicted_data = torch.squeeze(model(data)).cpu().detach().numpy()
predicted_data_easy = np.concatenate(
[predicted_data_easy, each_predicted_data],
axis=0)
# 困难版的,每次预测基于上次的预测
predicted_data_hard = np.concatenate([predicted_data_hard,
predictOneByOne(model, last_train_data, predict_num=predict_num)], axis=0)
plot_prediction(total_data, predicted_data_easy, predicted_data_hard)
if __name__ == '__main__':
test(40, 10, 50, 32,
"./parameters\dctLSTM_hidden40_feature10_predict50_epoch80_trainLoss0.05818896507844329_valLoss0.21667905896902084.pkl"
)