leecode更新
This commit is contained in:
parent
88a7aefa97
commit
82d051a82a
|
|
@ -0,0 +1,235 @@
|
|||
package com.markilue.leecode.greedy;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* @BelongsProject: Leecode
|
||||
* @BelongsPackage: com.markilue.leecode.greedy
|
||||
* @Author: markilue
|
||||
* @CreateTime: 2022-10-25 09:54
|
||||
* @Description: TODO 力扣53题 最大子数组和:
|
||||
* 给你一个整数数组 nums ,请你找出一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。
|
||||
* 子数组 是数组中的一个连续部分。
|
||||
* @Version: 1.0
|
||||
*/
|
||||
public class MaxSubArray {
|
||||
|
||||
@Test
|
||||
public void test(){
|
||||
int[] nums= {-2, 1, -3, 4, -1, 2, 1, -5, 4};
|
||||
System.out.println(maxSubArray3(nums));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test1(){
|
||||
int[] nums= {1};
|
||||
System.out.println(maxSubArray(nums));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test2(){
|
||||
int[] nums= {5,4,-1,7,8};
|
||||
|
||||
System.out.println(maxSubArray2(nums));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test3(){
|
||||
int[] nums= {-2,-1};
|
||||
System.out.println(maxSubArray(nums));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 使用一个最大数组记录每个位置可能的最大值和累加值,遇到正数则可以直接加,遇到负数则可以
|
||||
* [-2,1,-3,4,-1,2,1,-5,4]
|
||||
* max:[-2,1,1,4,4,5,6,6,6]
|
||||
* 如果总和是正数,则直接累加,如果是负数直接抛弃
|
||||
* total:[-2,1,-2,4,3,5,6,1,5]
|
||||
* 速度击败43.09%,内存击败94.95%:优化:max只用在total最大的时候记录一次
|
||||
*/
|
||||
public int maxSubArray(int[] nums) {
|
||||
if(nums.length==1){
|
||||
return nums[0];
|
||||
}
|
||||
|
||||
int max = nums[0]; //记录上一个位置的最大值
|
||||
int total = nums[0]; //记录上一个位置的累加值
|
||||
|
||||
for (int i = 1; i < nums.length; i++) {
|
||||
|
||||
if (nums[i] >= 0) {
|
||||
//当前这个数为正数:
|
||||
//判断total是否是正数,如果是负数则直接从当前开始加
|
||||
//判断上个total比max大还是小,决定max
|
||||
if(total>=0){
|
||||
max = max > total + nums[i] ? max : total+ nums[i];
|
||||
total=total+nums[i];
|
||||
}else {
|
||||
max = max > nums[i] ? max : nums[i];
|
||||
total=nums[i];
|
||||
}
|
||||
}else {
|
||||
//当前这个数为负数
|
||||
max=max>nums[i]?max:nums[i];
|
||||
if(total<nums[i]){
|
||||
total=nums[i];
|
||||
}else {
|
||||
total+=nums[i];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
return max;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 使用一个最大数组记录每个位置可能的最大值和累加值,遇到正数则可以直接加,遇到负数则可以
|
||||
* [-2,1,-3,4,-1,2,1,-5,4]
|
||||
* max:[-2,1,1,4,4,5,6,6,6]
|
||||
* 如果总和是正数,则直接累加,如果是负数直接抛弃
|
||||
* total:[-2,1,-2,4,3,5,6,1,5]
|
||||
* 速度击败43.09%,内存击败94.95%:优化:max只用在total最大的时候记录一次
|
||||
*/
|
||||
public int maxSubArray1(int[] nums) {
|
||||
if(nums.length==1){
|
||||
return nums[0];
|
||||
}
|
||||
|
||||
int max = nums[0]; //记录上一个位置的最大值
|
||||
int total = nums[0]; //记录上一个位置的累加值
|
||||
|
||||
for (int i = 1; i < nums.length; i++) {
|
||||
|
||||
if (nums[i] >= 0) {
|
||||
//当前这个数为正数:
|
||||
//判断total是否是正数,如果是负数则直接从当前开始加
|
||||
//判断上个total比max大还是小,决定max
|
||||
if(total>=0){
|
||||
total=total+nums[i];
|
||||
}else {
|
||||
total=nums[i];
|
||||
}
|
||||
}else {
|
||||
//当前这个数为负数
|
||||
if(total<nums[i]){
|
||||
total=nums[i];
|
||||
}else {
|
||||
total+=nums[i];
|
||||
}
|
||||
}
|
||||
if(max<total){
|
||||
max=total;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
return max;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 代码随想录贪心算法:
|
||||
* count<0立刻停止计算,从num[i+1]重新开始
|
||||
* 速度击败100%,内存击败93.21%
|
||||
*/
|
||||
public int maxSubArray2(int[] nums) {
|
||||
|
||||
int max = Integer.MIN_VALUE; //记录上一个位置的最大值
|
||||
int total = 0; //记录上一个位置的累加值
|
||||
|
||||
for (int i = 0; i < nums.length; i++) {
|
||||
total+=nums[i];
|
||||
|
||||
//取区间累计最大值作为max
|
||||
if(max<total){
|
||||
max=total;
|
||||
}
|
||||
|
||||
if(total<=0){
|
||||
total=0;
|
||||
}
|
||||
}
|
||||
return max;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 官方动态规划算法:
|
||||
* 实际就是求max{f(i)},而f(i)可以考虑使用max{f(i-1)+nums[i],nums[i]}
|
||||
* 速度击败100%,内存击败93.21%
|
||||
*/
|
||||
public int maxSubArray3(int[] nums) {
|
||||
|
||||
int pre = 0, maxAns = nums[0];
|
||||
for (int x : nums) {
|
||||
pre = Math.max(pre + x, x);
|
||||
maxAns = Math.max(maxAns, pre);
|
||||
}
|
||||
return maxAns;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 官方分治算法:
|
||||
* 我们定义一个操作 get(a, l, r) 表示查询 aa 序列 [l,r] 区间内的最大子段和,那么最终我们要求的答案就是 get(nums, 0, nums.size() - 1)。如何分治实现这个操作呢?对于一个区间 [l,r][l,r],我们取 m =(l+r)/2
|
||||
* 对区间 [l,m] 和 [m+1,r] 分治求解。当递归逐层深入直到区间长度缩小为 1 的时候,递归「开始回升」。
|
||||
* 这个时候我们考虑如何通过 [l,m] 区间的信息和 [m+1,r] 区间的信息合并成区间 [l,r] 的信息。最关键的两个问题是:
|
||||
* 我们要维护区间的哪些信息呢?我们如何合并这些信息呢?
|
||||
* 对于一个区间 [l,r],我们可以维护四个量:
|
||||
* lSum 表示 [l,r] 内以 l 为左端点的最大子段和
|
||||
* rSum 表示 [l,r] 内以 r 为右端点的最大子段和
|
||||
* mSum 表示 [l,r] 内的最大子段和
|
||||
* iSum 表示 [l,r] 的区间和
|
||||
* 速度击败100%,内存击败93.21%
|
||||
*/
|
||||
public int maxSubArray4(int[] nums) {
|
||||
|
||||
return getInfo(nums, 0, nums.length - 1).mSum;
|
||||
}
|
||||
|
||||
public class Status {
|
||||
public int lSum, rSum, mSum, iSum;
|
||||
|
||||
public Status(int lSum, int rSum, int mSum, int iSum) {
|
||||
this.lSum = lSum;
|
||||
this.rSum = rSum;
|
||||
this.mSum = mSum;
|
||||
this.iSum = iSum;
|
||||
}
|
||||
}
|
||||
|
||||
public Status getInfo(int[] a, int l, int r) {
|
||||
if (l == r) {
|
||||
return new Status(a[l], a[l], a[l], a[l]);
|
||||
}
|
||||
int m = (l + r) >> 1;
|
||||
//分
|
||||
Status lSub = getInfo(a, l, m);
|
||||
Status rSub = getInfo(a, m + 1, r);
|
||||
//和
|
||||
return pushUp(lSub, rSub);
|
||||
}
|
||||
|
||||
//和的算法
|
||||
public Status pushUp(Status l, Status r) {
|
||||
int iSum = l.iSum + r.iSum;
|
||||
//从最左边开始的最大值,因此会在两个中产生:一种是l的左边最大值,另一种是左边的总和加上右边的从左开始的最大值
|
||||
int lSum = Math.max(l.lSum, l.iSum + r.lSum);
|
||||
//右边同理
|
||||
int rSum = Math.max(r.rSum, r.iSum + l.rSum);
|
||||
//总体的最大值会从两个部分产生:一种是左边或者右边的最大值,另一种是左边的右最大值加上右边的左最大值
|
||||
int mSum = Math.max(Math.max(l.mSum, r.mSum), l.rSum + r.lSum);
|
||||
return new Status(lSum, rSum, mSum, iSum);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# coding: utf-8
|
||||
|
||||
'''
|
||||
@Author : dingjiawen
|
||||
@Date : 2022/10/24 16:40
|
||||
@Usage :
|
||||
@Desc :
|
||||
'''
|
||||
|
|
@ -0,0 +1,220 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# coding: utf-8
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import random
|
||||
import pandas as pd
|
||||
import seaborn as sns
|
||||
from condition_monitoring.data_deal.loadData import read_data
|
||||
|
||||
'''
|
||||
@Author : dingjiawen
|
||||
@Date : 2022/10/20 21:35
|
||||
@Usage : 测试相关画图设置
|
||||
@Desc :
|
||||
'''
|
||||
|
||||
result_file_name = "E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\self_try\compare\mse\RNet_C\RNet_C_timestamp120_feature10_result.csv"
|
||||
|
||||
mse_file_name = "E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\self_try\compare\mse\RNet_D\RNet_D_timestamp120_feature10_mse_predict1.csv"
|
||||
|
||||
max_file_name = "E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\self_try\compare\mse\RNet_D\RNet_D_timestamp120_feature10_max_predict1.csv"
|
||||
|
||||
source_path = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||
|
||||
|
||||
def plot_result(result_data):
|
||||
parameters = {
|
||||
'figure.dpi': 600,
|
||||
'figure.figsize': (2.8, 2),
|
||||
'savefig.dpi': 600,
|
||||
'xtick.direction': 'in',
|
||||
'ytick.direction': 'in',
|
||||
'xtick.labelsize': 5,
|
||||
'ytick.labelsize': 5,
|
||||
'legend.fontsize': 5,
|
||||
}
|
||||
plt.rcParams.update(parameters)
|
||||
plt.figure()
|
||||
plt.rc('font', family='Times New Roman') # 全局字体样式#画混淆矩阵
|
||||
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 5} # 设置坐标标签的字体大小,字体
|
||||
plt.scatter(list(range(result_data.shape[0])), result_data, c='black', s=0.5, label="predict")
|
||||
# 画出 y=1 这条水平线
|
||||
plt.axhline(0.5, c='red', label='Failure threshold', lw=1)
|
||||
# 箭头指向上面的水平线
|
||||
# plt.arrow(result_data.shape[0]*2/3, 0.55, 2000, 0.085, width=0.00001, ec='red',length_includes_head=True)
|
||||
# plt.text(result_data.shape[0] * 2 / 3 + 1000, 0.7, "real fault", fontsize=5, color='red',
|
||||
# verticalalignment='top')
|
||||
# plt.text(0, 0.55, "Threshold", fontsize=5, color='red',
|
||||
# verticalalignment='top')
|
||||
|
||||
plt.axvline(result_data.shape[0] * 2 / 3, c='blue', ls='-.', lw=0.5, label='real fault')
|
||||
# plt.axvline(415548, c='blue', ls='-.', lw=0.5, label='real fault')
|
||||
# plt.xticks(range(6), ('06/09/17', '12/09/17', '18/09/17', '24/09/17', '29/09/17')) # 设置x轴的标尺
|
||||
plt.text(result_data.shape[0] * 5 / 6, 0.4, "Fault", fontsize=5, color='black', verticalalignment='top',
|
||||
horizontalalignment='center',
|
||||
bbox={'facecolor': 'grey',
|
||||
'pad': 1.5,
|
||||
'linewidth': 0.1}, fontdict=font1)
|
||||
plt.text(result_data.shape[0] * 1 / 3, 0.6, "Norm", fontsize=5, color='black', verticalalignment='top',
|
||||
horizontalalignment='center',
|
||||
bbox={'facecolor': 'grey',
|
||||
'pad': 1.5, 'linewidth': 0.1}, fontdict=font1)
|
||||
|
||||
indices = [result_data.shape[0] * i / 4 for i in range(5)]
|
||||
classes = ['01/09/17', '08/09/17', '15/09/17', '22/09/17', '29/09/17']
|
||||
|
||||
indices1 = [i / 4 for i in range(5)]
|
||||
classes1 = ['0', '0.25', 'Threshold', '0.75', '1']
|
||||
|
||||
# 第一个是迭代对象,表示坐标的显示顺序,第二个参数是坐标轴显示列表
|
||||
plt.xticks([index + 0.5 for index in indices], classes, rotation=25) # 设置横坐标方向,rotation=45为45度倾斜
|
||||
# pad调整label与坐标轴之间的距离
|
||||
plt.tick_params(bottom=True, top=False, left=True, right=False, direction='inout', length=2, width=0.5, pad=1)
|
||||
# plt.yticks([index for index in indices1], classes1)
|
||||
plt.ylabel('Confidence', fontsize=5)
|
||||
plt.xlabel('Time', fontsize=5)
|
||||
plt.tight_layout()
|
||||
# plt.legend(loc='best',edgecolor='black',fontsize=3)
|
||||
plt.legend(loc='best', frameon=False, fontsize=3)
|
||||
# plt.grid()
|
||||
plt.show()
|
||||
pass
|
||||
|
||||
|
||||
def plot_MSE(total_MSE, total_max):
|
||||
parameters = {
|
||||
'figure.dpi': 600,
|
||||
'figure.figsize': (2.7, 2),
|
||||
'savefig.dpi': 600,
|
||||
'xtick.direction': 'in',
|
||||
'ytick.direction': 'in',
|
||||
'xtick.labelsize': 5,
|
||||
'ytick.labelsize': 5,
|
||||
'legend.fontsize': 5,
|
||||
}
|
||||
plt.rcParams.update(parameters)
|
||||
plt.figure()
|
||||
plt.rc('font', family='Times New Roman') # 全局字体样式#画混淆矩阵
|
||||
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 5} # 设置坐标标签的字体大小,字体
|
||||
|
||||
result_data = total_MSE
|
||||
# 画出 y=1 这条水平线
|
||||
# plt.axhline(0.5, c='red', label='Failure threshold',lw=1)
|
||||
# 箭头指向上面的水平线
|
||||
# plt.arrow(result_data.shape[0]*2/3, 0.55, 2000, 0.085, width=0.00001, ec='red',length_includes_head=True)
|
||||
# plt.text(result_data.shape[0] * 2 / 3 + 1000, 0.7, "real fault", fontsize=5, color='red',
|
||||
# verticalalignment='top')
|
||||
|
||||
plt.axvline(result_data.shape[0] * 2 / 3, c='blue', ls='-.', lw=0.5, label="real fault")
|
||||
|
||||
indices = [result_data.shape[0] * i / 4 for i in range(5)]
|
||||
classes = ['01/09/17', '08/09/17', '15/09/17', '22/09/17', '29/09/17']
|
||||
|
||||
plt.xticks([index + 0.5 for index in indices], classes, rotation=25) # 设置横坐标方向,rotation=45为45度倾斜
|
||||
plt.ylabel('Mse', fontsize=5)
|
||||
plt.xlabel('Time', fontsize=5)
|
||||
plt.tight_layout()
|
||||
|
||||
plt.plot(total_max, "--", label="max", linewidth=0.5)
|
||||
plt.plot(total_MSE, label="mse", linewidth=0.5, color='purple')
|
||||
plt.legend(loc='best', frameon=False, fontsize=5)
|
||||
|
||||
# plt.plot(total_mean)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
pass
|
||||
|
||||
|
||||
def plot_Corr(data, label):
|
||||
parameters = {
|
||||
'figure.dpi': 600,
|
||||
'figure.figsize': (2.8, 2),
|
||||
'savefig.dpi': 600,
|
||||
'xtick.direction': 'inout',
|
||||
'ytick.direction': 'inout',
|
||||
'xtick.labelsize': 3,
|
||||
'ytick.labelsize': 3,
|
||||
'legend.fontsize': 5,
|
||||
}
|
||||
plt.rcParams.update(parameters)
|
||||
plt.figure()
|
||||
plt.rc('font', family='Times New Roman') # 全局字体样式#画混淆矩阵
|
||||
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 4} # 设置坐标标签的字体大小,字体
|
||||
|
||||
print("计算皮尔逊相关系数")
|
||||
pd_data = pd.DataFrame(data)
|
||||
person = pd_data.corr()
|
||||
print(person)
|
||||
# 画热点图heatmap
|
||||
cmap = sns.heatmap(person, annot=True, annot_kws={
|
||||
'fontsize': 2.6
|
||||
})
|
||||
classes = ['Gs', 'Gio', 'Gip', 'Gp', 'Gwt', 'En', 'Gft', 'Grt', 'Gwt', 'Et', 'Rs', 'Ap', 'Ws', 'Dw', 'Ges', 'Gt', 'Vx','Vy']
|
||||
indices = range(len(person))
|
||||
plt.title("Heatmap of correlation coefficient matrix", size=6, fontdict=font1)
|
||||
# pad调整label与坐标轴之间的距离
|
||||
plt.tick_params(bottom=False, top=False, left=False, right=False, direction='inout', length=2, width=0.5,pad=1)
|
||||
plt.xticks([index + 0.5 for index in indices], classes, rotation=0) # 设置横坐标方向,rotation=45为45度倾斜
|
||||
plt.yticks([index + 0.5 for index in indices], classes, rotation=0)
|
||||
|
||||
# 调整色带的标签:
|
||||
cbar = cmap.collections[0].colorbar
|
||||
cbar.ax.tick_params(labelsize=4, labelcolor="black", length=2, width=0.5,pad=1)
|
||||
cbar.ax.set_ylabel(ylabel="color scale", color="black", loc="center", fontdict=font1)
|
||||
# plt.axis('off') # 去坐标轴
|
||||
|
||||
plt.savefig('./corr.png')
|
||||
|
||||
plt.show()
|
||||
pass
|
||||
|
||||
|
||||
def test_result(file_name: str = result_file_name):
|
||||
# result_data = np.recfromcsv(file_name)
|
||||
result_data = np.loadtxt(file_name, delimiter=",")
|
||||
result_data = np.array(result_data)
|
||||
print(len(result_data))
|
||||
theshold=len(result_data)-1
|
||||
print(theshold)
|
||||
print(theshold*2/3)
|
||||
#计算误报率和漏报率
|
||||
positive_rate=result_data[:int(theshold*2/3)][result_data[:int(theshold*2/3)] < 0.66].__len__()/(theshold*2/3)
|
||||
negative_rate=result_data[int(theshold*2/3):][result_data[int(theshold*2/3):] > 0.66].__len__()/(theshold*1/3)
|
||||
print("误报率:",positive_rate)
|
||||
print("漏报率",negative_rate)
|
||||
|
||||
# 画图
|
||||
data = np.zeros([208, ])
|
||||
result_data = np.concatenate([result_data, data], axis=0)
|
||||
print(result_data)
|
||||
print(result_data.shape)
|
||||
plot_result(result_data)
|
||||
|
||||
|
||||
def test_mse(mse_file_name: str = mse_file_name, max_file_name: str = max_file_name):
|
||||
mse_data = np.loadtxt(mse_file_name, delimiter=",")
|
||||
max_data = np.loadtxt(max_file_name, delimiter=',')
|
||||
mse_data = np.array(mse_data)
|
||||
max_data = np.array(max_data)
|
||||
print(mse_data.shape)
|
||||
print(max_data.shape)
|
||||
plot_MSE(mse_data, max_data)
|
||||
|
||||
|
||||
def test_corr(file_name=source_path, N=10):
|
||||
needed_data, label = read_data(file_name=file_name, isNew=False)
|
||||
print(needed_data)
|
||||
print(needed_data.shape)
|
||||
# plot_original_data(needed_data)
|
||||
person = plot_Corr(needed_data, label)
|
||||
person = np.array(person)
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# test_mse()
|
||||
test_result(file_name='E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\self_try\compare\mse\RNet_345\RNet_345_timestamp120_feature10_result.csv')
|
||||
# test_corr()
|
||||
pass
|
||||
|
|
@ -61,6 +61,16 @@ save_step_two_name = "../hard_model/two_weight/{0}_timestamp{1}_feature{2}_weigh
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||
# time_stamp,
|
||||
|
|
@ -375,7 +385,8 @@ def DCConv_Model():
|
|||
|
||||
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||
isSave: bool = False):
|
||||
isSave: bool = True, predictI: int = 1):
|
||||
# TODO 计算MSE确定阈值
|
||||
# TODO 计算MSE确定阈值
|
||||
|
||||
mse, mean, max = get_MSE(healthy_data, healthy_label, model)
|
||||
|
|
@ -384,30 +395,48 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
total, = mse.shape
|
||||
faultNum = 0
|
||||
faultList = []
|
||||
for i in range(total):
|
||||
if (mse[i] > max[i]):
|
||||
faultNum += 1
|
||||
faultList.append(mse[i])
|
||||
faultNum = mse[mse[:] > max[0]].__len__()
|
||||
# for i in range(total):
|
||||
# if (mse[i] > max[i]):
|
||||
# faultNum += 1
|
||||
# faultList.append(mse[i])
|
||||
|
||||
fault_rate = faultNum / total
|
||||
print("误报率:", fault_rate)
|
||||
|
||||
# 漏报率计算
|
||||
missNum = 0
|
||||
missList = []
|
||||
mse1 = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||
|
||||
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||
# min = np.broadcast_to(min,shape=[dims,])
|
||||
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||
if isSave:
|
||||
save_mse_name1 = save_mse_name
|
||||
save_max_name1 = save_max_name
|
||||
|
||||
np.savetxt(save_mse_name1, total_mse, delimiter=',')
|
||||
np.savetxt(save_max_name1, total_max, delimiter=',')
|
||||
|
||||
all, = mse1.shape
|
||||
|
||||
flag=True
|
||||
for i in range(all):
|
||||
if (mse1[i] < max[0] and flag) :
|
||||
missNum += 1
|
||||
missList.append(mse1[i])
|
||||
elif(mse1[i]>max[0]):
|
||||
flag=False
|
||||
print("all:",all)
|
||||
|
||||
missNum = mse1[mse1[:] < max[0]].__len__()
|
||||
|
||||
|
||||
print("all:", all)
|
||||
miss_rate = missNum / all
|
||||
print("漏报率:", miss_rate)
|
||||
|
||||
|
||||
|
||||
plt.figure(random.randint(1, 100))
|
||||
plt.plot(total_max)
|
||||
plt.plot(total_mse)
|
||||
plt.plot(total_mean)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
pass
|
||||
|
||||
|
||||
|
|
@ -451,9 +480,16 @@ if __name__ == '__main__':
|
|||
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||
|
||||
newModel = tf.keras.models.load_model(save_name)
|
||||
# 单次测试
|
||||
# getResult(newModel,
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||
# :],
|
||||
# healthy_label=train_label1_healthy[
|
||||
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||
getResult(newModel, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy)
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy,isSave=True)
|
||||
# mse, mean, max = get_MSE(train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
# train_label1_healthy[healthy_size - 2 * unhealthy_size:, :], new_model=newModel)
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ from model.CommonFunction.CommonFunction import *
|
|||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow.keras.models import load_model, save_model
|
||||
from keras.callbacks import EarlyStopping
|
||||
import random
|
||||
|
||||
'''超参数设置'''
|
||||
time_stamp = 120
|
||||
|
|
@ -36,7 +37,7 @@ K = 18
|
|||
namuda = 0.01
|
||||
'''保存名称'''
|
||||
|
||||
save_name = "./model/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||
save_name = "./model/{0}_timestamp{1}_feature{2}_epoch1_loss0.005.h5".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -46,6 +47,17 @@ save_step_two_name = "../hard_model/two_weight/{0}_timestamp{1}_feature{2}_weigh
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
|
||||
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||
# time_stamp,
|
||||
|
|
@ -182,46 +194,6 @@ def EWMA(data, K=K, namuda=namuda):
|
|||
pass
|
||||
|
||||
|
||||
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True):
|
||||
predicted_data = new_model.predict(data)
|
||||
|
||||
temp = np.abs(predicted_data - label)
|
||||
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predicted_data.shape))
|
||||
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predicted_data.shape)
|
||||
temp3 = temp1 / temp2
|
||||
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||
print("z:", mse)
|
||||
print(mse.shape)
|
||||
|
||||
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||
print("mse", mse)
|
||||
if isStandard:
|
||||
dims, = mse.shape
|
||||
mean = np.mean(mse)
|
||||
std = np.sqrt(np.var(mse))
|
||||
max = mean + 3 * std
|
||||
print("max:", max)
|
||||
# min = mean-3*std
|
||||
max = np.broadcast_to(max, shape=[dims, ])
|
||||
# min = np.broadcast_to(min,shape=[dims,])
|
||||
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||
if isPlot:
|
||||
plt.plot(max)
|
||||
plt.plot(mse)
|
||||
plt.plot(mean)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
else:
|
||||
if isPlot:
|
||||
plt.plot(mse)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
return mse
|
||||
|
||||
return mse, mean, max
|
||||
# pass
|
||||
|
||||
|
||||
def condition_monitoring_model():
|
||||
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||
|
|
@ -354,9 +326,52 @@ def GRU_Model():
|
|||
pass
|
||||
|
||||
|
||||
def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True, predictI: int = 1):
|
||||
predicted_data = new_model.predict(data)
|
||||
|
||||
temp = np.abs(predicted_data - label)
|
||||
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predicted_data.shape))
|
||||
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predicted_data.shape)
|
||||
temp3 = temp1 / temp2
|
||||
mse = np.sum((temp1 / temp2) ** 2, axis=1)
|
||||
print("z:", mse)
|
||||
print(mse.shape)
|
||||
|
||||
# mse=np.mean((predicted_data-label)**2,axis=1)
|
||||
print("mse", mse)
|
||||
if isStandard:
|
||||
dims, = mse.shape
|
||||
mean = np.mean(mse)
|
||||
std = np.sqrt(np.var(mse))
|
||||
max = mean + 3 * std
|
||||
print("max:", max)
|
||||
# min = mean-3*std
|
||||
max = np.broadcast_to(max, shape=[dims, ])
|
||||
# min = np.broadcast_to(min,shape=[dims,])
|
||||
mean = np.broadcast_to(mean, shape=[dims, ])
|
||||
if isPlot:
|
||||
plt.figure(random.randint(1,9))
|
||||
plt.plot(max)
|
||||
plt.plot(mse)
|
||||
plt.plot(mean)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
else:
|
||||
if isPlot:
|
||||
plt.figure(random.randint(1, 9))
|
||||
plt.plot(mse)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
return mse
|
||||
|
||||
return mse, mean, max
|
||||
# pass
|
||||
|
||||
|
||||
# healthy_data是健康数据,用于确定阈值,all_data是完整的数据,用于模型出结果
|
||||
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||
isSave: bool = False):
|
||||
isSave: bool = True, predictI: int = 1):
|
||||
# TODO 计算MSE确定阈值
|
||||
# TODO 计算MSE确定阈值
|
||||
|
||||
mse, mean, max = get_MSE(healthy_data, healthy_label, model)
|
||||
|
|
@ -365,26 +380,48 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
total, = mse.shape
|
||||
faultNum = 0
|
||||
faultList = []
|
||||
for i in range(total):
|
||||
if (mse[i] > max[i]):
|
||||
faultNum += 1
|
||||
faultList.append(mse[i])
|
||||
faultNum = mse[mse[:] > max[0]].__len__()
|
||||
# for i in range(total):
|
||||
# if (mse[i] > max[i]):
|
||||
# faultNum += 1
|
||||
# faultList.append(mse[i])
|
||||
|
||||
fault_rate = faultNum / total
|
||||
print("误报率:", fault_rate)
|
||||
|
||||
# 漏报率计算
|
||||
missNum = 0
|
||||
missList = []
|
||||
mse1 = get_MSE(unhealthy_data, unhealthy_label, model,isStandard=False)
|
||||
all,= mse1.shape
|
||||
for i in range(all):
|
||||
if (mse1[i] < max[0]):
|
||||
missNum += 1
|
||||
missList.append(mse1[i])
|
||||
mse1 = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||
|
||||
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||
# min = np.broadcast_to(min,shape=[dims,])
|
||||
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||
if isSave:
|
||||
save_mse_name1 = save_mse_name
|
||||
save_max_name1 = save_max_name
|
||||
|
||||
np.savetxt(save_mse_name1, total_mse, delimiter=',')
|
||||
np.savetxt(save_max_name1, total_max, delimiter=',')
|
||||
|
||||
all, = mse1.shape
|
||||
|
||||
|
||||
missNum = mse1[mse1[:] < max[0]].__len__()
|
||||
|
||||
|
||||
print("all:", all)
|
||||
miss_rate = missNum / all
|
||||
print("漏报率:", miss_rate)
|
||||
|
||||
|
||||
|
||||
plt.figure(random.randint(1, 100))
|
||||
plt.plot(total_max)
|
||||
plt.plot(total_mse)
|
||||
plt.plot(total_mean)
|
||||
# plt.plot(min)
|
||||
plt.show()
|
||||
pass
|
||||
|
||||
|
||||
|
|
@ -404,16 +441,16 @@ if __name__ == '__main__':
|
|||
model.summary()
|
||||
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=5, mode='min', verbose=1)
|
||||
|
||||
checkpoint = tf.keras.callbacks.ModelCheckpoint(
|
||||
filepath=save_name,
|
||||
monitor='val_loss',
|
||||
verbose=1,
|
||||
save_best_only=True,
|
||||
mode='min',
|
||||
period=1)
|
||||
# checkpoint = tf.keras.callbacks.ModelCheckpoint(
|
||||
# filepath=save_name,
|
||||
# monitor='val_loss',
|
||||
# verbose=1,
|
||||
# save_best_only=True,
|
||||
# mode='min',
|
||||
# period=1)
|
||||
|
||||
history = model.fit(train_data_healthy[:30000,:,:], train_label1_healthy[:30000,:], epochs=10,
|
||||
batch_size=32,validation_split=0.2,shuffle=False, verbose=1,callbacks=[checkpoint,early_stop])
|
||||
# history = model.fit(train_data_healthy[:30000,:,:], train_label1_healthy[:30000,:], epochs=10,
|
||||
# batch_size=32,validation_split=0.2,shuffle=False, verbose=1,callbacks=[checkpoint,early_stop])
|
||||
# model.save(save_name)
|
||||
|
||||
## TODO testing
|
||||
|
|
@ -427,9 +464,19 @@ if __name__ == '__main__':
|
|||
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||
|
||||
newModel = tf.keras.models.load_model(save_name)
|
||||
|
||||
# 单次测试
|
||||
# getResult(newModel,
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||
# :],
|
||||
# healthy_label=train_label1_healthy[
|
||||
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||
getResult(newModel, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy)
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy,isSave=True)
|
||||
|
||||
|
||||
# mse, mean, max = get_MSE(train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
# train_label1_healthy[healthy_size - 2 * unhealthy_size:, :], new_model=newModel)
|
||||
|
||||
|
|
|
|||
|
|
@ -42,18 +42,18 @@ save_name = "./model/weight/{0}/weight".format(model_name,
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_step_two_name = "./model/two_weight/{0}_weight_epoch6_99899_9996/weight".format(model_name,
|
||||
save_step_two_name = "./model/two_weight/{0}_weight_epoch0_9973_9994/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name = "./mse/RNet_3/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_3/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -472,10 +472,7 @@ def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False
|
|||
total_result = np.reshape(total_result, [-1, ])
|
||||
|
||||
#误报率,漏报率,准确性的计算
|
||||
|
||||
|
||||
if isSave:
|
||||
|
||||
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||
if isPlot:
|
||||
plt.figure(1, figsize=(6.0, 2.68))
|
||||
|
|
@ -649,7 +646,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data = np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -673,14 +671,14 @@ if __name__ == '__main__':
|
|||
### unhealthy_data.shape: (16594,10)
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
train_data=train_data,
|
||||
train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
# train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
# healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
# healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
# unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
# train_data=train_data,
|
||||
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
|
|
|
|||
|
|
@ -42,18 +42,18 @@ save_name = "./model/weight/{0}/weight".format(model_name,
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_step_two_name = "./model/two_weight/{0}_weight/weight".format(model_name,
|
||||
save_step_two_name = "./model/two_weight/{0}_weight_epoch0_9965_9996_9989/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name = "./mse/RNet_34/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_34/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -649,7 +649,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data = np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -673,14 +674,14 @@ if __name__ == '__main__':
|
|||
### unhealthy_data.shape: (16594,10)
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
train_data=train_data,
|
||||
train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
# train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
# healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
# healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
# unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
# train_data=train_data,
|
||||
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
|
|
|
|||
|
|
@ -42,18 +42,18 @@ save_name = "./model/weight/{0}/weight".format(model_name,
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_step_two_name = "./model/two_weight/{0}_weight/weight".format(model_name,
|
||||
save_step_two_name = "./model/two_weight/{0}_weight_epoch0_9974_9996_9990/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name = "./mse/RNet_35/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_35/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -649,7 +649,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data = np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -673,14 +674,14 @@ if __name__ == '__main__':
|
|||
### unhealthy_data.shape: (16594,10)
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
train_data=train_data,
|
||||
train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
# train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
# healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
# healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
# unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
# train_data=train_data,
|
||||
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
|
|
|
|||
|
|
@ -42,18 +42,18 @@ save_name = "./model/weight/{0}/weight".format(model_name,
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_step_two_name = "./model/two_weight/{0}_weight/weight".format(model_name,
|
||||
save_step_two_name = "./model/two_weight/{0}_weight_epoch0_9973_9996_9982/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -649,7 +649,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data = np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -673,14 +674,14 @@ if __name__ == '__main__':
|
|||
### unhealthy_data.shape: (16594,10)
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
train_data=train_data,
|
||||
train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
# train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
# healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
# healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
# unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
# train_data=train_data,
|
||||
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
|
|
|
|||
|
|
@ -42,18 +42,18 @@ save_name = "./model/weight/{0}/weight".format(model_name,
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_step_two_name = "./model/two_weight/{0}_weight/weight".format(model_name,
|
||||
save_step_two_name = "./model/two_weight/{0}_weight_epoch0_9969_9992/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name = "./mse/RNet_45/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_45/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -470,12 +470,8 @@ def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False
|
|||
total_result.append(output4)
|
||||
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||
total_result = np.reshape(total_result, [-1, ])
|
||||
|
||||
#误报率,漏报率,准确性的计算
|
||||
|
||||
|
||||
# 误报率,漏报率,准确性的计算
|
||||
if isSave:
|
||||
|
||||
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||
if isPlot:
|
||||
plt.figure(1, figsize=(6.0, 2.68))
|
||||
|
|
@ -649,7 +645,9 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
|
||||
total_data=np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -673,14 +671,14 @@ if __name__ == '__main__':
|
|||
### unhealthy_data.shape: (16594,10)
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
train_data=train_data,
|
||||
train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
# train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
# healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
# healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
# unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
# train_data=train_data,
|
||||
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
|
|
|
|||
|
|
@ -42,18 +42,18 @@ save_name = "./model/weight/{0}/weight".format(model_name,
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_step_two_name = "./model/two_weight/{0}_weight/weight".format(model_name,
|
||||
save_step_two_name = "./model/two_weight/{0}_weight_epoch1_9990_9992/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_result.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_C/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -649,7 +649,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data = np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -673,14 +674,14 @@ if __name__ == '__main__':
|
|||
### unhealthy_data.shape: (16594,10)
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
train_data=train_data,
|
||||
train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
# train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
|
||||
# healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
# healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
|
||||
# unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
|
||||
# train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
|
||||
# train_data=train_data,
|
||||
# train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
|
||||
|
||||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
|
|
|
|||
|
|
@ -48,6 +48,17 @@ save_step_two_name = "./model/two_weight/{0}_timestamp{1}_feature{2}_weight/weig
|
|||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
|
||||
# time_stamp,
|
||||
# feature_num,
|
||||
|
|
@ -557,11 +568,11 @@ def get_MSE(data, label, new_model, isStandard: bool = True, isPlot: bool = True
|
|||
def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data, unhealthy_label, isPlot: bool = False,
|
||||
isSave: bool = False, predictI: int = 1):
|
||||
# TODO 计算MSE确定阈值
|
||||
|
||||
# plt.ion()
|
||||
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||
|
||||
for mse, mean, max, mse1 in zip(mseList, meanList, maxList, mse1List):
|
||||
for mse, mean, max, mse1, j in zip(mseList, meanList, maxList, mse1List, range(3)):
|
||||
|
||||
# 误报率的计算
|
||||
total, = mse.shape
|
||||
|
|
@ -588,15 +599,22 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
print("漏报率:", miss_rate)
|
||||
|
||||
# 总体图
|
||||
print("mse:", mse)
|
||||
print("mse1:", mse1)
|
||||
# print("mse:", mse)
|
||||
# print("mse1:", mse1)
|
||||
print("============================================")
|
||||
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||
# min = np.broadcast_to(min,shape=[dims,])
|
||||
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||
|
||||
plt.figure(random.randint(1, 9))
|
||||
if isSave:
|
||||
save_mse_name1 = save_mse_name[:-4] + "_predict" + str(j + 1) + ".csv"
|
||||
save_max_name1 = save_max_name[:-4] + "_predict" + str(j + 1) + ".csv"
|
||||
|
||||
np.savetxt(save_mse_name1, total_mse, delimiter=',')
|
||||
np.savetxt(save_max_name1, total_max, delimiter=',')
|
||||
|
||||
plt.figure(random.randint(1, 100))
|
||||
plt.plot(total_max)
|
||||
plt.plot(total_mse)
|
||||
plt.plot(total_mean)
|
||||
|
|
@ -606,7 +624,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data=np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -618,7 +637,7 @@ if __name__ == '__main__':
|
|||
# train_step_one(train_data=train_data_healthy[:256, :, :], train_label1=train_label1_healthy[:256, :],
|
||||
# train_label2=train_label2_healthy[:256, ])
|
||||
#### 模型训练
|
||||
train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||
|
||||
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||
step_one_model = Joint_Monitoring()
|
||||
|
|
@ -633,17 +652,18 @@ if __name__ == '__main__':
|
|||
all_data, _, _ = get_training_data_overlapping(
|
||||
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||
|
||||
##出结果单次测试
|
||||
##出结果单次测试
|
||||
# getResult(step_one_model,
|
||||
# healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200,
|
||||
# :],
|
||||
# healthy_label=train_label1_healthy[
|
||||
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||
# unhealthy_data=train_data_unhealthy[:200,:], unhealthy_label=train_label1_unhealthy[:200,:])
|
||||
|
||||
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||
#
|
||||
getResult(step_one_model, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy)
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy, isSave=True)
|
||||
|
||||
# ###TODO 展示全部的结果
|
||||
# all_data, _, _ = get_training_data_overlapping(
|
||||
|
|
|
|||
|
|
@ -48,12 +48,12 @@ save_step_two_name = "./model/two_weight/{0}_timestamp{1}_feature{2}_weight/weig
|
|||
EPOCH)
|
||||
|
||||
|
||||
save_mse_name = "./mse/RNet_MSE/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_MSE/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -598,8 +598,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
print("漏报率:", miss_rate)
|
||||
|
||||
# 总体图
|
||||
print("mse:", mse)
|
||||
print("mse1:", mse1)
|
||||
# print("mse:", mse)
|
||||
# print("mse1:", mse1)
|
||||
print("============================================")
|
||||
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||
|
|
@ -625,6 +625,7 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
np.save("G:\data\SCADA数据/total_data.npy",total_data)
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -636,7 +637,7 @@ if __name__ == '__main__':
|
|||
# train_step_one(train_data=train_data_healthy[:256, :, :], train_label1=train_label1_healthy[:256, :],
|
||||
# train_label2=train_label2_healthy[:256, ])
|
||||
#### 模型训练
|
||||
train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||
|
||||
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||
step_one_model = Joint_Monitoring()
|
||||
|
|
@ -658,7 +659,7 @@ if __name__ == '__main__':
|
|||
# healthy_label=train_label1_healthy[
|
||||
# healthy_size - 2 * unhealthy_size:healthy_size - 2 * unhealthy_size + 200, :],
|
||||
# unhealthy_data=train_data_unhealthy[:200, :], unhealthy_label=train_label1_unhealthy[:200, :],isSave=True)
|
||||
|
||||
#
|
||||
getResult(step_one_model, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy, isSave=True)
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ K = 18
|
|||
namuda = 0.01
|
||||
'''保存名称'''
|
||||
# save_name = "E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\hard_model\weight\joint_timestamp120_feature10_weight_epoch11_0.0077/weight"
|
||||
save_name = "./model/weight/{0}/weight".format(model_name,
|
||||
save_name = "./model/weight/{0}_epoch3_2.47_1.63/weight".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -47,12 +47,12 @@ save_step_two_name = "../hard_model/two_weight/{0}_timestamp{1}_feature{2}_weigh
|
|||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name = "./mse/RNet_S/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||
save_mse_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_mse.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
save_max_name = "./mse/RNet_S/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
|
|
@ -246,9 +246,6 @@ def EWMA(data, K=K, namuda=namuda):
|
|||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def condition_monitoring_model():
|
||||
input = tf.keras.Input(shape=[time_stamp, feature_num])
|
||||
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
|
||||
|
|
@ -586,7 +583,7 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
mseList, meanList, maxList = get_MSE(healthy_data, healthy_label, model)
|
||||
mse1List, _, _ = get_MSE(unhealthy_data, unhealthy_label, model, isStandard=False)
|
||||
|
||||
for mse, mean, max, mse1,j in zip(mseList, meanList, maxList, mse1List,range(3)):
|
||||
for mse, mean, max, mse1, j in zip(mseList, meanList, maxList, mse1List, range(3)):
|
||||
|
||||
# 误报率的计算
|
||||
total, = mse.shape
|
||||
|
|
@ -613,8 +610,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
print("漏报率:", miss_rate)
|
||||
|
||||
# 总体图
|
||||
print("mse:", mse)
|
||||
print("mse1:", mse1)
|
||||
# print("mse:", mse)
|
||||
# print("mse1:", mse1)
|
||||
print("============================================")
|
||||
total_mse = np.concatenate([mse, mse1], axis=0)
|
||||
total_max = np.broadcast_to(max[0], shape=[total_mse.shape[0], ])
|
||||
|
|
@ -622,12 +619,11 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
total_mean = np.broadcast_to(mean[0], shape=[total_mse.shape[0], ])
|
||||
|
||||
if isSave:
|
||||
save_mse_name1=save_mse_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||
save_max_name1=save_max_name[:-4]+"_predict"+str(j+1)+".csv"
|
||||
|
||||
np.savetxt(save_mse_name1,total_mse, delimiter=',')
|
||||
np.savetxt(save_max_name1,total_max, delimiter=',')
|
||||
save_mse_name1 = save_mse_name[:-4] + "_predict" + str(j + 1) + ".csv"
|
||||
save_max_name1 = save_max_name[:-4] + "_predict" + str(j + 1) + ".csv"
|
||||
|
||||
np.savetxt(save_mse_name1, total_mse, delimiter=',')
|
||||
np.savetxt(save_max_name1, total_max, delimiter=',')
|
||||
|
||||
plt.figure(random.randint(1, 100))
|
||||
plt.plot(total_max)
|
||||
|
|
@ -639,7 +635,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data=np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -649,8 +646,7 @@ if __name__ == '__main__':
|
|||
#### TODO 第一步训练
|
||||
# 单次测试
|
||||
# train_step_one(train_data=train_data_healthy[:32, :, :], train_label1=train_label1_healthy[:32, :],train_label2=train_label2_healthy[:32, ])
|
||||
train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||
|
||||
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
|
||||
|
||||
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
|
||||
step_one_model = Joint_Monitoring()
|
||||
|
|
@ -674,7 +670,7 @@ if __name__ == '__main__':
|
|||
|
||||
getResult(step_one_model, healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
healthy_label=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy,isSave=True)
|
||||
unhealthy_data=train_data_unhealthy, unhealthy_label=train_label1_unhealthy, isSave=True)
|
||||
|
||||
###TODO 展示全部的结果
|
||||
# all_data, _, _ = get_training_data_overlapping(
|
||||
|
|
@ -685,3 +681,6 @@ if __name__ == '__main__':
|
|||
# showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,11 @@ save_max_name = "./mse/ResNet/{0}_timestamp{1}_feature{2}_max.csv".format(model_
|
|||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
|
||||
save_mse_name1 = "./mse/ResNet/{0}_timestamp{1}_feature{2}_totalresult.csv".format(model_name,
|
||||
time_stamp,
|
||||
feature_num,
|
||||
batch_size,
|
||||
EPOCH)
|
||||
'''文件名'''
|
||||
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||
|
||||
|
|
@ -232,13 +236,15 @@ def showResult(step_two_model: tf.keras.Model, test_data, isPlot: bool = False,
|
|||
|
||||
size, length, dims = test_data.shape
|
||||
predict_label = step_two_model.predict(test_data)
|
||||
total_result = np.reshape(total_result, [total_result.__len__(), -1])
|
||||
total_result = np.reshape(total_result, [-1, ])
|
||||
predict_label=predict_label.flatten()
|
||||
# total_result = np.reshape(predict_label, [predict_label.__len__(), -1])
|
||||
# total_data=np.flatten(total_result)
|
||||
total_result = predict_label
|
||||
|
||||
# 误报率,漏报率,准确性的计算
|
||||
|
||||
if isSave:
|
||||
np.savetxt(save_mse_name, total_result, delimiter=',')
|
||||
np.savetxt(save_mse_name1, total_result, delimiter=',')
|
||||
if isPlot:
|
||||
plt.figure(1, figsize=(6.0, 2.68))
|
||||
plt.subplots_adjust(left=0.1, right=0.94, bottom=0.2, top=0.9, wspace=None,
|
||||
|
|
@ -351,7 +357,8 @@ def plot_confusion_matrix_accuracy(cls, true_labels, predict_labels):
|
|||
if __name__ == '__main__':
|
||||
# # 数据读入
|
||||
#
|
||||
total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
# total_data = loadData.execute(N=feature_num, file_name=file_name)
|
||||
total_data = np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
|
||||
total_data = normalization(data=total_data)
|
||||
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
|
||||
total_data[:healthy_date, :], is_Healthy=True)
|
||||
|
|
@ -394,9 +401,9 @@ if __name__ == '__main__':
|
|||
healthy_size, _, _ = train_data_healthy.shape
|
||||
unhealthy_size, _, _ = train_data_unhealthy.shape
|
||||
all_data, _, _ = get_training_data_overlapping(
|
||||
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
|
||||
total_data[0:, :], is_Healthy=True)
|
||||
|
||||
showResult(step_two_model, test_data=all_data, isPlot=True)
|
||||
showResult(model, test_data=all_data, isPlot=True)
|
||||
|
||||
# trained_data = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer('bn_last').output).predict(
|
||||
# train_data)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,9 @@
|
|||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import random
|
||||
import pandas as pd
|
||||
import seaborn as sns
|
||||
from condition_monitoring.data_deal.loadData import read_data
|
||||
|
||||
'''
|
||||
@Author : dingjiawen
|
||||
|
|
@ -19,6 +22,7 @@ mse_file_name="E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_mon
|
|||
|
||||
max_file_name="E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\self_try\compare\mse\RNet_D\RNet_D_timestamp120_feature10_max_predict1.csv"
|
||||
|
||||
source_path = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
|
||||
|
||||
|
||||
def plot_result(result_data):
|
||||
|
|
@ -126,6 +130,46 @@ def plot_MSE(total_MSE,total_max):
|
|||
pass
|
||||
|
||||
|
||||
def plot_Corr(data, label):
|
||||
parameters = {
|
||||
'figure.dpi': 600,
|
||||
'figure.figsize': (2.8, 2),
|
||||
'savefig.dpi': 600,
|
||||
'xtick.direction': 'inout',
|
||||
'ytick.direction': 'inout',
|
||||
'xtick.labelsize': 3,
|
||||
'ytick.labelsize': 3,
|
||||
'legend.fontsize': 5,
|
||||
}
|
||||
plt.rcParams.update(parameters)
|
||||
plt.figure()
|
||||
plt.rc('font', family='Times New Roman') # 全局字体样式#画混淆矩阵
|
||||
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 5} # 设置坐标标签的字体大小,字体
|
||||
|
||||
print("计算皮尔逊相关系数")
|
||||
pd_data = pd.DataFrame(data)
|
||||
person = pd_data.corr()
|
||||
print(person)
|
||||
# 画热点图heatmap
|
||||
cmap = sns.heatmap(person, annot=True, xticklabels=label, yticklabels=label,annot_kws={
|
||||
'fontsize':2.6
|
||||
})
|
||||
|
||||
plt.title("Heatmap of correlation coefficient matrix", size=7, fontdict=font1)
|
||||
plt.tick_params(bottom=False, top=False, left=False, right=False,direction='inout',length=2,width=0.5)
|
||||
plt.yticks(rotation=90)
|
||||
|
||||
# 调整色带的标签:
|
||||
cbar = cmap.collections[0].colorbar
|
||||
cbar.ax.tick_params(labelsize=4, labelcolor="black",length=2,width=0.5)
|
||||
cbar.ax.set_ylabel(ylabel="color scale", color="black", loc="center", fontdict=font1)
|
||||
# plt.axis('off') # 去坐标轴
|
||||
|
||||
plt.savefig('./corr.png')
|
||||
|
||||
plt.show()
|
||||
pass
|
||||
|
||||
def test_result(file_name:str=result_file_name):
|
||||
# result_data = np.recfromcsv(file_name)
|
||||
result_data = np.loadtxt(file_name, delimiter=",")
|
||||
|
|
@ -148,9 +192,24 @@ def test_mse(mse_file_name:str=mse_file_name,max_file_name:str=max_file_name):
|
|||
plot_MSE(mse_data,max_data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# test_mse()
|
||||
test_result()
|
||||
|
||||
|
||||
def test_corr(file_name=source_path,N=10):
|
||||
needed_data, label = read_data(file_name=file_name, isNew=False)
|
||||
print(needed_data)
|
||||
print(needed_data.shape)
|
||||
# plot_original_data(needed_data)
|
||||
person = plot_Corr(needed_data, label)
|
||||
person = np.array(person)
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# test_mse()
|
||||
# test_result()
|
||||
test_corr()
|
||||
pass
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -170,6 +170,7 @@ class Joint_Monitoring(keras.Model):
|
|||
# step three
|
||||
# 分类器
|
||||
concat3 = tf.concat([output2, output3], axis=1)
|
||||
# concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||
d4 = self.d4(concat3)
|
||||
d5 = self.d5(d4)
|
||||
|
|
@ -288,7 +289,8 @@ class Joint_Monitoring(keras.Model):
|
|||
# f3 = tf.reshape(p3, shape=[B, -1])
|
||||
# step three
|
||||
# 分类器
|
||||
concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||
# concat3 = tf.concat([output1, output2, output3], axis=1)
|
||||
concat3 = tf.concat([ output2, output3], axis=1)
|
||||
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
|
||||
d4 = self.d4(concat3)
|
||||
d5 = self.d5(d4)
|
||||
|
|
|
|||
Loading…
Reference in New Issue