leecode更新

This commit is contained in:
markilue 2022-10-28 17:00:14 +08:00
parent 4b279d5f1d
commit 3a2eadc687
6 changed files with 1473 additions and 98 deletions

View File

@ -0,0 +1,125 @@
package com.markilue.leecode.greedy;
import org.junit.Test;
import java.util.ArrayList;
import java.util.HashSet;
/**
* @BelongsProject: Leecode
* @BelongsPackage: com.markilue.leecode.greedy
* @Author: markilue
* @CreateTime: 2022-10-28 10:15
* @Description: TODO 力扣45题 跳跃游戏II
* 给你一个非负整数数组nums 你最初位于数组的第一个位置
* 数组中的每个元素代表你在该位置可以跳跃的最大长度
* 你的目标是使用最少的跳跃次数到达数组的最后一个位置
* @Version: 1.0
*/
public class Jump {
@Test
public void test() {
int[] nums = {2, 2, 2, 1, 4};
System.out.println(jump(nums));
}
@Test
public void test1() {
int[] nums = {2, 3, 1, 1, 4};
System.out.println(jump(nums));
}
@Test
public void test2() {
int[] nums = {1, 1, 1, 1, 1};
System.out.println(jump(nums));
}
@Test
public void test3() {
int[] nums = {1, 1};
System.out.println(jump(nums));
}
@Test
public void test4() {
int[] nums = {3, 2, 1};
System.out.println(jump(nums));
}
@Test
public void test5() {
int[] nums = {5, 4, 0, 1, 3, 6, 8, 0, 9, 4, 9, 1, 8, 7, 4, 8};
System.out.println(jump1(nums));
}
/**
* 要求使用最少跳跃次数达到数组的最后一个位置即最早能覆盖cover到最后
* 速度击败99.04%内存击败62.81%
*
* @param nums
* @return
*/
public int jump(int[] nums) {
if (nums.length == 1) {
return 0;
}
int curDistance = 0; //当前覆盖的最远距离的下标
int step = 0; //记录要走的步数
int nextDistance = 0; //下一步覆盖的最远距离的下标
for (int i = 0; i < nums.length; i++) {
//更新下一步覆盖的最远距离的下标
nextDistance = Math.max(nums[i] + i, nextDistance);
//遇到当前覆盖的最远距离的下标
if (i == curDistance) {
if (curDistance != nums.length - 1) {
step++;//需要走一步
curDistance = nextDistance;//更新当前覆盖的最远距离的下标
//下一步的覆盖范围已经包含终点结束循环
if (nextDistance >= nums.length - 1) break;
} else break;//当前覆盖的最远距离的下标是集合终点不需要再走了
}
}
return step;
}
/**
* 贪心算法II:即移动下标只要遇到当前覆盖的最远距离的下标则步数加一不考虑是不是终点的情况
* 速度击败99.04%内存击败62.81%
*
* @param nums
* @return
*/
public int jump1(int[] nums) {
int curDistance = 0; //当前覆盖的最远距离的下标
int step = 0; //记录要走的步数
int nextDistance = 0; //下一步覆盖的最远距离的下标
//注意这里是小于nums.size()-1这里是关键
for (int i = 0; i < nums.length - 1; i++) {
//更新下一步覆盖的最远距离的下标
nextDistance = Math.max(nums[i] + i, nextDistance);
//遇到当前覆盖的最远距离的下标
if (i == curDistance) {
curDistance = nextDistance;
step++;//需要走一步
}
}
return step;
}
}

View File

@ -8,6 +8,13 @@ import random
import pandas as pd
import seaborn as sns
from condition_monitoring.data_deal.loadData import read_data
from model.Joint_Monitoring.Joint_Monitoring_banda import Joint_Monitoring
from model.Joint_Monitoring.compare.RNet_L import Joint_Monitoring as Joint_Monitoring_L
from model.Joint_Monitoring.compare.RNet_S import Joint_Monitoring as Joint_Monitoring_SE
import tensorflow as tf
import tensorflow.keras
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
'''
@Author : dingjiawen
@ -24,7 +31,12 @@ max_file_name = "E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_m
source_path = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
list = [64.16, 65.26, 65.11, 66.6, 67.16, 66.28, 73.86, 75.24, 73.98, 76.7, 98.86, 99.45, 99.97]
model_name = "../hard_model/two_weight/banda_joint_epoch0_8887_9454/weight"
model_name1 = "../hard_model/two_weight/banda_joint_epoch14_9872_9863/weight"
file_name = "E:\跑模型\论文写作/DCAU.txt"
# list = [64.16, 65.26, 65.11, 66.6, 67.16, 66.28, 73.86, 75.24, 73.98, 76.7, 98.86, 99.45, 99.97]
def plot_result(result_data):
@ -39,10 +51,10 @@ def plot_result(result_data):
'legend.fontsize': 5,
}
plt.rcParams.update(parameters)
plt.figure()
plt.rc('font', family='Times New Roman') # 全局字体样式#画混淆矩阵
fig,ax=plt.subplots(1, 1)
plt.rc('font', family='Times New Roman') # 全局字体样式
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 5} # 设置坐标标签的字体大小,字体
plt.scatter(list(range(result_data.shape[0])), result_data, c='black', s=0.5, label="predict")
plt.scatter(list(range(result_data.shape[0])), result_data, c='black', s=0.01, label="predict")
# 画出 y=1 这条水平线
plt.axhline(0.5, c='red', label='Failure threshold', lw=1)
# 箭头指向上面的水平线
@ -79,9 +91,49 @@ def plot_result(result_data):
plt.ylabel('Confidence', fontsize=5)
plt.xlabel('Time', fontsize=5)
plt.tight_layout()
# plt.legend(loc='best',edgecolor='black',fontsize=3)
plt.legend(loc='best', frameon=False, fontsize=3)
# plt.legend(loc='best', edgecolor='black', fontsize=4)
plt.legend(loc='best', frameon=False, fontsize=4)
# plt.grid()
#局部方法图
axins = inset_axes(ax, width="40%", height="30%", loc='lower left',
bbox_to_anchor=(0.1, 0.1, 1, 1),
bbox_transform=ax.transAxes)
axins.scatter(list(range(result_data.shape[0])), result_data, c='black', s=0.001, label="predict")
axins.axvline(result_data.shape[0] * 2 / 3, c='blue', ls='-.', lw=0.5, label='real fault')
plt.axhline(0.5, c='red', label='Failure threshold', lw=0.5)
#设置放大区间
# 设置放大区间
zone_left = int(result_data.shape[0]*2/3-100)
zone_right = int(result_data.shape[0]*2/3)+100
x=list(range(result_data.shape[0]))
# 坐标轴的扩展比例(根据实际数据调整)
x_ratio = 0.5 # x轴显示范围的扩展比例
y_ratio = 0.5 # y轴显示范围的扩展比例
mark_inset(ax, axins, loc1=4, loc2=2, fc="none", ec='k', lw=0.5)
# X轴的显示范围
xlim0 = x[zone_left] - (x[zone_right] - x[zone_left]) * x_ratio
xlim1 = x[zone_right] + (x[zone_right] - x[zone_left]) * x_ratio
# axins.tick_params(bottom=False)
axins.xaxis.set_visible(False)
# Y轴的显示范围
# y = np.hstack((y_1[zone_left:zone_right], y_2[zone_left:zone_right], y_3[zone_left:zone_right]))
# ylim0 = np.min(y) - (np.max(y) - np.min(y)) * y_ratio
# ylim1 = np.max(y) + (np.max(y) - np.min(y)) * y_ratio
# 调整子坐标系的显示范围
axins.set_xlim(xlim0, xlim1)
# axins.set_ylim(ylim0, ylim1)
plt.show()
pass
@ -133,31 +185,31 @@ def plot_MSE(total_MSE, total_max):
def plot_Corr(data, size: int = 1):
parameters = {
'figure.dpi': 600,
'figure.figsize': (2.8 * size, 2 * size),
'figure.figsize': (9, 7),
'savefig.dpi': 600,
'xtick.direction': 'inout',
'ytick.direction': 'inout',
'xtick.labelsize': 3 * size,
'ytick.labelsize': 3 * size,
'legend.fontsize': 5 * size,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
'legend.fontsize': 12,
}
plt.rcParams.update(parameters)
plt.figure()
plt.rc('font', family='Times New Roman') # 全局字体样式#画混淆矩阵
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 4 * size} # 设置坐标标签的字体大小,字体
# font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 4 * size} # 设置坐标标签的字体大小,字体
print("计算皮尔逊相关系数")
pd_data = pd.DataFrame(data)
person = pd_data.corr()
print(person)
# 画热点图heatmap
cmap = sns.heatmap(person, annot=True, annot_kws={
'fontsize': 2.6 * size
cmap = sns.heatmap(person, annot=True, cmap='Blues',annot_kws={
'fontsize': 11
})
classes = ['Gs', 'Gio', 'Gip', 'Gp', 'Gwt', 'En', 'Gft', 'Grt', 'Gwt', 'Et', 'Rs', 'Ap', 'Ws', 'Dw', 'Ges', 'Gt',
'Vx', 'Vy']
indices = range(len(person))
plt.title("Heatmap of correlation coefficient matrix", size=6 * size, fontdict=font1)
plt.title("Heatmap of correlation coefficient matrix", size=18)
# pad调整label与坐标轴之间的距离
plt.tick_params(bottom=False, top=False, left=False, right=False, direction='inout', length=2, width=0.5, pad=1)
plt.xticks([index + 0.5 for index in indices], classes, rotation=0) # 设置横坐标方向rotation=45为45度倾斜
@ -165,11 +217,11 @@ def plot_Corr(data, size: int = 1):
# 调整色带的标签:
cbar = cmap.collections[0].colorbar
cbar.ax.tick_params(labelsize=4 * size, labelcolor="black", length=2, width=0.5, pad=1)
cbar.ax.set_ylabel(ylabel="color scale", color="black", loc="center", fontdict=font1)
cbar.ax.tick_params(labelsize=10, labelcolor="black", length=2, width=0.5, pad=1)
# cbar.ax.set_ylabel(ylabel="color scale", color="black", loc="center", size=12)
# plt.axis('off') # 去坐标轴
plt.savefig('./corr.png')
# plt.savefig('./corr.png')
plt.show()
pass
@ -220,6 +272,7 @@ def plot_bar(y_data):
plt.ylim([-0.01, 5])
plt.show()
def acc(y_data=list):
parameters = {
'figure.dpi': 600,
@ -227,9 +280,9 @@ def acc(y_data=list):
'savefig.dpi': 600,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'legend.fontsize': 12,
'xtick.labelsize': 18,
'ytick.labelsize': 20,
'legend.fontsize': 14,
}
plt.rcParams.update(parameters)
plt.figure()
@ -255,15 +308,16 @@ def acc(y_data=list):
# plt.tick_params(bottom=False, top=False, left=True, right=False, direction='in', pad=1)
plt.xticks([])
plt.ylabel('Accuracy(%)',fontsize=18)
# plt.xlabel('Time', fontsize=5)
plt.ylabel('Accuracy(%)', fontsize=20)
plt.xlabel('Methods', fontsize=20)
# plt.tight_layout()
num1, num2, num3, num4 = 0.08, 1, 3, 0
num1, num2, num3, num4 = 0, 1, 3, 0
plt.legend(bbox_to_anchor=(num1, num2), loc=num3, borderaxespad=num4, ncol=5, frameon=False, markerscale=0.5)
plt.ylim([60, 105])
plt.show()
def plot_FNR1(y_data):
parameters = {
'figure.dpi': 600,
@ -348,6 +402,84 @@ def plot_FNR2(y_data):
plt.show()
def plot_hot(data):
# 画热点图heatmap
plt.figure(1, figsize=(9, 12))
for i in range(8):
plt.subplot(8, 1, i + 1)
# cbar_kws设置色带fraction设置大小pad设置填充
cmap = sns.heatmap(np.expand_dims(data[i, :], axis=0), annot=False, cmap='Blues', linecolor='black',
linewidths=1,
cbar_kws={"orientation": "horizontal",
'fraction': 0.6,
'pad': 0.00
})
plt.title("Pred=0,Expc=0", size=18)
plt.axis('off') # 去坐标轴
# 调整色带的标签:
cbar = cmap.collections[0].colorbar
cbar.ax.tick_params(labelsize=15, labelcolor="black")
# cbar.ax.set_xlabel(xlabel="color scale", color="red", loc="center", fontdict=font2)
# plt.tight_layout()
plt.show()
pass
def plot_hot_one(data):
# 画热点图heatmap
# plt.figure(1, figsize=(9, 12))
plt.subplots(figsize=(14,2))
# cbar_kws设置色带fraction设置大小pad设置填充
cmap = sns.heatmap(data, annot=False, cmap='Blues', linecolor='black', linewidths=1,
cbar_kws={"orientation": "horizontal",
'fraction': 0.6,
'pad': 0.00
})
plt.title("Pred=0,Expc=0", size=18)
plt.axis('off') # 去坐标轴
# 调整色带的标签:
cbar = cmap.collections[0].colorbar
cbar.ax.tick_params(labelsize=15, labelcolor="black")
# cbar.ax.set_xlabel(xlabel="color scale", color="red", loc="center", fontdict=font2)
# plt.tight_layout()
plt.show()
pass
def plot_mse(file_name="../others_idea/mse"):
mse=np.loadtxt(file_name,delimiter=",")
mse= mse[2000:2338]
parameters = {
'figure.dpi': 600,
'figure.figsize': (7,5),
'savefig.dpi': 600,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'legend.fontsize': 10,
}
plt.rcParams.update(parameters)
plt.figure()
plt.rc('font', family='Times New Roman') # 全局字体样式#画混淆矩阵
indices = [mse.shape[0] * i / 4 for i in range(5)]
classes = ['13/09/17', '14/09/17', '15/09/17', '16/09/17', '17/09/17']
# plt.xticks([index + 0.5 for index in indices], classes, rotation=25) # 设置横坐标方向rotation=45为45度倾斜
plt.ylabel('MSE', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.plot(mse)
plt.show()
def test_result(file_name: str = result_file_name):
# result_data = np.recfromcsv(file_name)
result_data = np.loadtxt(file_name, delimiter=",")
@ -365,8 +497,8 @@ def test_result(file_name: str = result_file_name):
print("漏报率", negative_rate)
# 画图
data = np.zeros([208, ])
result_data = np.concatenate([result_data, data], axis=0)
# data = np.zeros([208, ])
# result_data = np.concatenate([result_data, data], axis=0)
print(result_data)
print(result_data.shape)
plot_result(result_data)
@ -382,12 +514,15 @@ def test_mse(mse_file_name: str = mse_file_name, max_file_name: str = max_file_n
plot_MSE(mse_data, max_data)
def test_corr(file_name=source_path, N=10):
needed_data, label = read_data(file_name=file_name, isNew=False)
print(needed_data)
print(needed_data.shape)
# needed_data, label = read_data(file_name=file_name, isNew=False)
# print(needed_data)
# print(needed_data.shape)
# np.save("corr.npy",needed_data)
needed_data=np.load("corr.npy")
# plot_original_data(needed_data)
person = plot_Corr(needed_data, size=3)
person = plot_Corr(needed_data)
person = np.array(person)
pass
@ -396,6 +531,66 @@ def test_bar(y_data=list):
plot_bar(y_data)
def test_model_weight(model_name=model_name):
print("===============第一次===================")
model = Joint_Monitoring()
model.load_weights(model_name)
# model.build(input_shape=(32, 120, 10))
DCAU = model.get_layer('dynamic_channel_attention')
DCAU.build(input_shape=[16, 120, 20])
one = tf.ones(shape=[16, 120, 20])
weight = DCAU.call(one)[0, 0, :10]
print(weight)
weight = np.expand_dims(weight, axis=0)
plot_hot(weight)
pass
def test_model_weight_l(model_name=model_name):
print("===============第一次===================")
model = Joint_Monitoring_L()
model.load_weights(model_name)
# model.build(input_shape=(32, 120, 10))
# print(model.summary())
DCAU = model.get_layer('light_channel_attention')
DCAU.build(input_shape=[16, 120, 20])
one = tf.ones(shape=[16, 120, 20])
weight = DCAU.call(one)[0, 0, :10]
print(weight)
weight = np.expand_dims(weight, axis=0)
plot_hot_one(weight)
pass
def test_model_weight_s(model_name=model_name):
print("===============第一次===================")
model = Joint_Monitoring_SE()
model.load_weights(model_name)
# model.build(input_shape=(32, 120, 10))
# print(model.summary())
DCAU = model.get_layer('se_channel_attention')
DCAU.build(input_shape=[16, 120, 20])
one = tf.ones(shape=[16, 120, 20])
weight = DCAU.call(one)[0, 0, :10]
print(weight)
weight = np.expand_dims(weight, axis=0)
plot_hot_one(weight)
pass
def test_model_visualization(model_name=file_name):
with open(file_name, 'r', encoding='utf-8') as f:
data = np.loadtxt(f, str, delimiter=',')
needed_data = data[1:, 1:].astype(dtype=np.float)
print(needed_data.shape)
plot_hot(needed_data)
if __name__ == '__main__':
# test_mse()
# test_result(file_name='E:\self_example\TensorFlow_eaxmple\Model_train_test\condition_monitoring\self_try\compare\mse\RNet_C\RNet_C_timestamp120_feature10_result2.csv')
@ -407,5 +602,15 @@ if __name__ == '__main__':
# list=[99.99,98.95,99.95,96.1,95,99.65,76.25,72.64,75.87,68.74]
# plot_FNR1(list)
#
list=[3.43,1.99,1.92,2.17,1.63,1.81,1.78,1.8,0.6]
plot_FNR2(list)
# list=[3.43,1.99,1.92,2.17,1.63,1.81,1.78,1.8,0.6]
# plot_FNR2(list)
# 查看网络某一层的权重
# test_model_visualization()
file_name = "../self_try\compare\model\weight\RNet_L_banda_epoch17_0.0086_0.0092/weight"
test_model_weight_l(file_name)
# file_name = "../self_try\compare\model\weight\RNet_S_epoch3_2.47_1.63/weight"
# test_model_weight_l(file_name)
#单独预测图
# plot_mse()

View File

@ -0,0 +1,579 @@
# -*- coding: utf-8 -*-
# coding: utf-8
import tensorflow as tf
import tensorflow.keras
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
from condition_monitoring.data_deal import loadData_daban as loadData
from model.Joint_Monitoring.Joint_Monitoring_banda import Joint_Monitoring
from model.CommonFunction.CommonFunction import *
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import load_model, save_model
'''
@Author : dingjiawen
@Date : 2022/7/8 10:29
@Usage : 尝试将预测和分类两种方式相结合,联合监测
@Desc :REPVGG+unsampling+GRU进行重构,后面接GDP=全局动态池化+分类器
随epoch衰减的MSELoss+随epoch增强的crossEntropy
'''
'''超参数设置'''
time_stamp = 120
feature_num = 10
batch_size = 32
learning_rate = 0.001
EPOCH = 101
model_name = "banda_joint"
'''EWMA超参数'''
K = 18
namuda = 0.01
'''保存名称'''
save_name = "../hard_model/weight/{0}_epoch16_0.0009_0.0014/weight".format(model_name,
time_stamp,
feature_num,
batch_size,
EPOCH)
save_step_two_name = "../hard_model/two_weight/{0}/weight".format(model_name,
time_stamp,
feature_num,
batch_size,
EPOCH)
# save_name = "../model/joint/{0}_timestamp{1}_feature{2}.h5".format(model_name,
# time_stamp,
# feature_num,
# batch_size,
# EPOCH)
# save_step_two_name = "../model/joint_two/{0}_timestamp{1}_feature{2}.h5".format(model_name,
# time_stamp,
# feature_num,
# batch_size,
# EPOCH)
'''文件名'''
file_name = "G:\data\SCADA数据\SCADA_已处理_粤水电达坂城2020.1月-5月\风机15.csv"
'''
文件说明jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
文件从0:96748行均是正常值(2019/12.30 00:00:00 - 2020/3/11 05:58:00)
从96748:107116行均是异常值(2020/3/11 05:58:01 - 2021/3/18 11:04:00)
'''
'''文件参数'''
# 最后正常的时间点
healthy_date = 96748
# 最后异常的时间点
unhealthy_date = 107116
# 异常容忍程度
unhealthy_patience = 5
def remove(data, time_stamp=time_stamp):
rows, cols = data.shape
print("remove_data.shape:", data.shape)
num = int(rows / time_stamp)
return data[:num * time_stamp, :]
pass
# 不重叠采样
def get_training_data(data, time_stamp: int = time_stamp):
removed_data = remove(data=data)
rows, cols = removed_data.shape
print("removed_data.shape:", data.shape)
print("removed_data:", removed_data)
train_data = np.reshape(removed_data, [-1, time_stamp, cols])
print("train_data:", train_data)
batchs, time_stamp, cols = train_data.shape
for i in range(1, batchs):
each_label = np.expand_dims(train_data[i, 0, :], axis=0)
if i == 1:
train_label = each_label
else:
train_label = np.concatenate([train_label, each_label], axis=0)
print("train_data.shape:", train_data.shape)
print("train_label.shape", train_label.shape)
return train_data[:-1, :], train_label
# 重叠采样
def get_training_data_overlapping(data, time_stamp: int = time_stamp, is_Healthy: bool = True):
rows, cols = data.shape
train_data = np.empty(shape=[rows - time_stamp - 1, time_stamp, cols])
train_label = np.empty(shape=[rows - time_stamp - 1, cols])
for i in range(rows):
if i + time_stamp >= rows:
break
if i + time_stamp < rows - 1:
train_data[i] = data[i:i + time_stamp]
train_label[i] = data[i + time_stamp]
print("重叠采样以后:")
print("data:", train_data) # (300334,120,10)
print("label:", train_label) # (300334,10)
if is_Healthy:
train_label2 = np.ones(shape=[train_label.shape[0]])
else:
train_label2 = np.zeros(shape=[train_label.shape[0]])
print("label2:", train_label2)
return train_data, train_label, train_label2
# RepConv重参数化卷积
def RepConv(input_tensor, k=3):
_, _, output_dim = input_tensor.shape
conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=k, strides=1, padding='SAME')(input_tensor)
b1 = tf.keras.layers.BatchNormalization()(conv1)
conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='SAME')(input_tensor)
b2 = tf.keras.layers.BatchNormalization()(conv2)
b3 = tf.keras.layers.BatchNormalization()(input_tensor)
out = tf.keras.layers.Add()([b1, b2, b3])
out = tf.nn.relu(out)
return out
# RepBlock模块
def RepBlock(input_tensor, num: int = 3):
for i in range(num):
input_tensor = RepConv(input_tensor)
return input_tensor
# GAP 全局平均池化
def Global_avg_channelAttention(input_tensor):
_, length, channel = input_tensor.shape
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
s1 = tf.nn.sigmoid(c1)
output = tf.multiply(input_tensor, s1)
return output
# GDP 全局动态池化
def Global_Dynamic_channelAttention(input_tensor):
_, length, channel = input_tensor.shape
DWC1 = DepthwiseConv1D(kernel_size=1, padding='SAME')(input_tensor)
# GAP
GAP = tf.keras.layers.GlobalAvgPool1D()(DWC1)
c1 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GAP)
s1 = tf.nn.sigmoid(c1)
# GMP
GMP = tf.keras.layers.GlobalMaxPool1D()(DWC1)
c2 = tf.keras.layers.Conv1D(filters=channel, kernel_size=1, padding='SAME')(GMP)
s3 = tf.nn.sigmoid(c2)
output = tf.multiply(input_tensor, s1)
return output
# 归一化
def normalization(data):
rows, cols = data.shape
print("归一化之前:", data)
print(data.shape)
print("======================")
# 归一化
max = np.max(data, axis=0)
max = np.broadcast_to(max, [rows, cols])
min = np.min(data, axis=0)
min = np.broadcast_to(min, [rows, cols])
data = (data - min) / (max - min)
print("归一化之后:", data)
print(data.shape)
return data
# 正则化
def Regularization(data):
rows, cols = data.shape
print("正则化之前:", data)
print(data.shape)
print("======================")
# 正则化
mean = np.mean(data, axis=0)
mean = np.broadcast_to(mean, shape=[rows, cols])
dst = np.sqrt(np.var(data, axis=0))
dst = np.broadcast_to(dst, shape=[rows, cols])
data = (data - mean) / dst
print("正则化之后:", data)
print(data.shape)
return data
pass
def EWMA(data, K=K, namuda=namuda):
# t是啥暂时未知
t = 0
mid = np.mean(data, axis=0)
standard = np.sqrt(np.var(data, axis=0))
UCL = mid + K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
LCL = mid - K * standard * np.sqrt(namuda / (2 - namuda) * (1 - (1 - namuda) ** 2 * t))
return mid, UCL, LCL
pass
def get_MSE(data, label, new_model):
predicted_data = new_model.predict(data)
temp = np.abs(predicted_data - label)
temp1 = (temp - np.broadcast_to(np.mean(temp, axis=0), shape=predicted_data.shape))
temp2 = np.broadcast_to(np.sqrt(np.var(temp, axis=0)), shape=predicted_data.shape)
temp3 = temp1 / temp2
mse = np.sum((temp1 / temp2) ** 2, axis=1)
print("z:", mse)
print(mse.shape)
# mse=np.mean((predicted_data-label)**2,axis=1)
print("mse", mse)
dims, = mse.shape
mean = np.mean(mse)
std = np.sqrt(np.var(mse))
max = mean + 3 * std
# min = mean-3*std
max = np.broadcast_to(max, shape=[dims, ])
# min = np.broadcast_to(min,shape=[dims,])
mean = np.broadcast_to(mean, shape=[dims, ])
# plt.plot(max)
# plt.plot(mse)
# plt.plot(mean)
# # plt.plot(min)
# plt.show()
#
#
return mse, mean, max
# pass
def condition_monitoring_model():
input = tf.keras.Input(shape=[time_stamp, feature_num])
conv1 = tf.keras.layers.Conv1D(filters=256, kernel_size=1)(input)
GRU1 = tf.keras.layers.GRU(128, return_sequences=False)(conv1)
d1 = tf.keras.layers.Dense(300)(GRU1)
output = tf.keras.layers.Dense(10)(d1)
model = tf.keras.Model(inputs=input, outputs=output)
return model
# trian_data:(300455,120,10)
# trian_label1:(300455,10)
# trian_label2:(300455,)
def shuffle(train_data, train_label1, train_label2, is_split: bool = False, split_size: float = 0.2):
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(train_data,
train_label1,
train_label2,
test_size=split_size,
shuffle=True,
random_state=100)
if is_split:
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
train_data = np.concatenate([train_data, test_data], axis=0)
train_label1 = np.concatenate([train_label1, test_label1], axis=0)
train_label2 = np.concatenate([train_label2, test_label2], axis=0)
# print(train_data.shape)
# print(train_label1.shape)
# print(train_label2.shape)
# print(train_data.shape)
return train_data, train_label1, train_label2
pass
def split_test_data(healthy_data, healthy_label1, healthy_label2, unhealthy_data, unhealthy_label1, unhealthy_label2,
split_size: float = 0.2, shuffle: bool = True):
data = np.concatenate([healthy_data, unhealthy_data], axis=0)
label1 = np.concatenate([healthy_label1, unhealthy_label1], axis=0)
label2 = np.concatenate([healthy_label2, unhealthy_label2], axis=0)
(train_data, test_data, train_label1, test_label1, train_label2, test_label2) = train_test_split(data,
label1,
label2,
test_size=split_size,
shuffle=shuffle,
random_state=100)
# print(train_data.shape)
# print(train_label1.shape)
# print(train_label2.shape)
# print(train_data.shape)
return train_data, train_label1, train_label2, test_data, test_label1, test_label2
pass
# trian_data:(300455,120,10)
# trian_label1:(300455,10)
# trian_label2:(300455,)
def train_step_one(train_data, train_label1, train_label2):
model = Joint_Monitoring()
# # # # TODO 需要运行编译一次,才能打印model.summary()
# model.build(input_shape=(batch_size, filter_num, dims))
# model.summary()
history_loss = []
history_val_loss = []
learning_rate = 1e-3
for epoch in range(EPOCH):
print()
print("EPOCH:", epoch, "/", EPOCH, ":")
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
if epoch == 0:
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
train_label2,
is_split=True)
# print()
# print("EPOCH:", epoch, "/", EPOCH, ":")
# 用于让train知道这是这个epoch中的第几次训练
z = 0
# 用于batch_size次再训练
k = 1
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
size, _, _ = train_data.shape
data_1 = tf.expand_dims(data_1, axis=0)
label_1 = tf.expand_dims(label_1, axis=0)
label_2 = tf.expand_dims(label_2, axis=0)
if batch_size != 1:
if k % batch_size == 1:
data = data_1
label1 = label_1
label2 = label_2
else:
data = tf.concat([data, data_1], axis=0)
label1 = tf.concat([label1, label_1], axis=0)
label2 = tf.concat([label2, label_2], axis=0)
else:
data = data_1
label1 = label_1
label2 = label_2
if k % batch_size == 0:
# label = tf.expand_dims(label, axis=-1)
loss_value, accuracy_value = model.train(input_tensor=data, label1=label1, label2=label2,
learning_rate=learning_rate,
is_first_time=True)
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy())
k = 0
z = z + 1
k = k + 1
val_loss, val_accuracy = model.get_val_loss(val_data=val_data, val_label1=val_label1, val_label2=val_label2,
is_first_time=True)
SaveBestModel(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
# SaveBestH5Model(model=model, save_name=save_name, history_loss=history_val_loss, loss_value=val_loss.numpy())
history_val_loss.append(val_loss)
history_loss.append(loss_value.numpy())
print('Training loss is :', loss_value.numpy())
print('Validating loss is :', val_loss.numpy())
if IsStopTraining(history_loss=history_val_loss, patience=7):
break
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
if learning_rate >= 1e-4:
learning_rate = learning_rate * 0.1
pass
def train_step_two(step_one_model, step_two_model, train_data, train_label1, train_label2):
# step_two_model = Joint_Monitoring()
# step_two_model.build(input_shape=(batch_size, time_stamp, feature_num))
# step_two_model.summary()
history_loss = []
history_val_loss = []
history_accuracy = []
learning_rate = 1e-3
for epoch in range(EPOCH):
print()
print("EPOCH:", epoch, "/", EPOCH, ":")
train_data, train_label1, train_label2 = shuffle(train_data, train_label1, train_label2)
if epoch == 0:
train_data, train_label1, train_label2, val_data, val_label1, val_label2 = shuffle(train_data, train_label1,
train_label2,
is_split=True)
# print()
# print("EPOCH:", epoch, "/", EPOCH, ":")
# 用于让train知道这是这个epoch中的第几次训练
z = 0
# 用于batch_size次再训练
k = 1
accuracy_num = 0
for data_1, label_1, label_2 in zip(train_data, train_label1, train_label2):
size, _, _ = train_data.shape
data_1 = tf.expand_dims(data_1, axis=0)
label_1 = tf.expand_dims(label_1, axis=0)
label_2 = tf.expand_dims(label_2, axis=0)
if batch_size != 1:
if k % batch_size == 1:
data = data_1
label1 = label_1
label2 = label_2
else:
data = tf.concat([data, data_1], axis=0)
label1 = tf.concat([label1, label_1], axis=0)
label2 = tf.concat([label2, label_2], axis=0)
else:
data = data_1
label1 = label_1
label2 = label_2
if k % batch_size == 0:
# label = tf.expand_dims(label, axis=-1)
output1, output2, output3, _ = step_one_model.call(inputs=data, is_first_time=True)
loss_value, accuracy_value = step_two_model.train(input_tensor=data, label1=label1, label2=label2,
learning_rate=learning_rate,
is_first_time=False, pred_3=output1, pred_4=output2,
pred_5=output3,epoch=epoch)
accuracy_num += accuracy_value
print(z * batch_size, "/", size, ":===============>", "loss:", loss_value.numpy(), "| accuracy:",
accuracy_num / ((z + 1) * batch_size))
k = 0
z = z + 1
k = k + 1
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=val_data, val_label1=val_label1,
val_label2=val_label2,
is_first_time=False, step_one_model=step_one_model,epoch=epoch)
SaveBestModelByAccuracy(model=step_two_model, save_name=save_step_two_name, history_accuracy=history_accuracy,
accuracy_value=val_accuracy)
history_val_loss.append(val_loss)
history_loss.append(loss_value.numpy())
history_accuracy.append(val_accuracy)
print('Training loss is : {0} | Training accuracy is : {1}'.format(loss_value.numpy(),
accuracy_num / ((z + 1) * batch_size)))
print('Validating loss is : {0} | Validating accuracy is : {1}'.format(val_loss.numpy(), val_accuracy))
if IsStopTraining(history_loss=history_val_loss, patience=7):
break
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
if learning_rate >= 1e-4:
learning_rate = learning_rate * 0.1
else:
print("学习率不再下降")
pass
def test(step_one_model, step_two_model, test_data, test_label1, test_label2):
history_loss = []
history_val_loss = []
val_loss, val_accuracy = step_two_model.get_val_loss(val_data=test_data, val_label1=test_label1,
val_label2=test_label2,
is_first_time=False, step_one_model=step_one_model)
history_val_loss.append(val_loss)
print("val_accuracy:", val_accuracy)
print("val_loss:", val_loss)
def showResult(step_two_model: Joint_Monitoring, test_data, isPlot: bool = False):
# 获取模型的所有参数的个数
# step_two_model.count_params()
total_result = []
size, length, dims = test_data.shape
for epoch in range(0, size - batch_size + 1, batch_size):
each_test_data = test_data[epoch:epoch + batch_size, :, :]
_, _, _, output4 = step_two_model.call(each_test_data, is_first_time=False)
total_result.append(output4)
total_result = np.reshape(total_result, [total_result.__len__(), -1])
total_result = np.reshape(total_result, [-1, ])
if isPlot:
plt.scatter(list(range(total_result.shape[0])), total_result, c='black', s=10)
# 画出 y=1 这条水平线
plt.axhline(0.5, c='red', label='Failure threshold')
# 箭头指向上面的水平线
# plt.arrow(35000, 0.9, 33000, 0.75, head_width=0.02, head_length=0.1, shape="full", fc='red', ec='red',
# alpha=0.9, overhang=0.5)
# plt.text(35000, 0.9, "Truth Fault", fontsize=10, color='black', verticalalignment='top')
plt.axvline(test_data.shape[0] * 2 / 3, c='blue', ls='-.')
plt.xlabel("time")
plt.ylabel("confience")
plt.text(total_result.shape[0] * 4 / 5, 0.6, "Fault", fontsize=10, color='black', verticalalignment='top',
horizontalalignment='center',
bbox={'facecolor': 'grey',
'pad': 10})
plt.text(total_result.shape[0] * 1 / 3, 0.4, "Norm", fontsize=10, color='black', verticalalignment='top',
horizontalalignment='center',
bbox={'facecolor': 'grey',
'pad': 10})
plt.grid()
# plt.ylim(0, 1)
# plt.xlim(-50, 1300)
# plt.legend("", loc='upper left')
plt.show()
return total_result
if __name__ == '__main__':
total_data = loadData.execute(N=feature_num, file_name=file_name)
total_data = normalization(data=total_data)
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
total_data[:healthy_date, :], is_Healthy=True)
train_data_unhealthy, train_label1_unhealthy, train_label2_unhealthy = get_training_data_overlapping(
total_data[healthy_date - time_stamp + unhealthy_patience:unhealthy_date, :],
is_Healthy=False)
#### TODO 第一步训练
# 单次测试
# train_step_one(train_data=train_data_healthy[:128, :, :], train_label1=train_label1_healthy[:128, :],train_label2=train_label2_healthy[:128, ])
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
step_one_model = Joint_Monitoring()
step_one_model.load_weights(save_name)
step_two_model = Joint_Monitoring()
step_two_model.load_weights(save_name)
#### TODO 第二步训练
### healthy_data.shape: (300333,120,10)
### unhealthy_data.shape: (16594,10)
healthy_size, _, _ = train_data_healthy.shape
unhealthy_size, _, _ = train_data_unhealthy.shape
train_data, train_label1, train_label2, test_data, test_label1, test_label2 = split_test_data(
healthy_data=train_data_healthy[healthy_size - 2 * unhealthy_size:, :, :],
healthy_label1=train_label1_healthy[healthy_size - 2 * unhealthy_size:, :],
healthy_label2=train_label2_healthy[healthy_size - 2 * unhealthy_size:, ], unhealthy_data=train_data_unhealthy,
unhealthy_label1=train_label1_unhealthy, unhealthy_label2=train_label2_unhealthy)
train_step_two(step_one_model=step_one_model, step_two_model=step_two_model,
train_data=train_data,
train_label1=train_label1, train_label2=np.expand_dims(train_label2, axis=-1))
### TODO 测试测试集
step_one_model = Joint_Monitoring()
step_one_model.load_weights(save_name)
step_two_model = Joint_Monitoring()
step_two_model.load_weights(save_step_two_name)
# test(step_one_model=step_one_model, step_two_model=step_two_model, test_data=test_data, test_label1=test_label1,
# test_label2=np.expand_dims(test_label2, axis=-1))
###TODO 展示全部的结果
all_data, _, _ = get_training_data_overlapping(
total_data[healthy_size - 2 * unhealthy_size:unhealthy_date, :], is_Healthy=True)
# all_data = np.concatenate([])
# 单次测试
# showResult(step_two_model, test_data=all_data[:32], isPlot=True)
showResult(step_two_model, test_data=all_data, isPlot=True)
pass

View File

@ -17,7 +17,8 @@ import pandas as pd
import matplotlib.pyplot as plt
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention
from condition_monitoring.data_deal import loadData
from condition_monitoring.data_deal import loadData_daban as loadData
from model.Joint_Monitoring.compare.RNet_L import Joint_Monitoring
from model.CommonFunction.CommonFunction import *
@ -28,16 +29,16 @@ import random
'''超参数设置'''
time_stamp = 120
feature_num = 10
batch_size = 16
batch_size = 32
learning_rate = 0.001
EPOCH = 101
model_name = "RNet_L"
model_name = "RNet_L_banda"
'''EWMA超参数'''
K = 18
namuda = 0.01
'''保存名称'''
save_name = "./model/weight/{0}_timestamp{1}_feature{2}_weight/weight".format(model_name,
save_name = "./model/weight/{0}/weight".format(model_name,
time_stamp,
feature_num,
batch_size,
@ -70,7 +71,7 @@ save_max_name = "./mse/{0}/{0}_timestamp{1}_feature{2}_max.csv".format(model_nam
# batch_size,
# EPOCH)
'''文件名'''
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
file_name = "G:\data\SCADA数据\SCADA_已处理_粤水电达坂城2020.1月-5月\风机15.csv"
'''
文件说明jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
@ -79,9 +80,9 @@ file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
'''
'''文件参数'''
# 最后正常的时间点
healthy_date = 415548
healthy_date = 96748
# 最后异常的时间点
unhealthy_date = 432153
unhealthy_date = 107116
# 异常容忍程度
unhealthy_patience = 5
@ -360,6 +361,8 @@ def train_step_one(train_data, train_label1, train_label2):
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
if learning_rate >= 1e-4:
learning_rate = learning_rate * 0.1
else:
print("学习率不再下降")
pass
@ -435,6 +438,8 @@ def train_step_two(step_one_model, step_two_model, train_data, train_label1, tra
if Is_Reduce_learning_rate(history_loss=history_val_loss, patience=3):
if learning_rate >= 1e-4:
learning_rate = learning_rate * 0.1
else:
print("学习率不再下降")
pass
@ -624,8 +629,8 @@ def getResult(model: tf.keras.Model, healthy_data, healthy_label, unhealthy_data
if __name__ == '__main__':
# total_data = loadData.execute(N=feature_num, file_name=file_name)
total_data=np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
total_data = loadData.execute(N=feature_num, file_name=file_name)
# total_data=np.load("G:\data\SCADA数据\靖边8号处理后的数据\原始10SCADA数据/total_data.npy")
total_data = normalization(data=total_data)
train_data_healthy, train_label1_healthy, train_label2_healthy = get_training_data_overlapping(
total_data[:healthy_date, :], is_Healthy=True)
@ -637,7 +642,7 @@ if __name__ == '__main__':
# train_step_one(train_data=train_data_healthy[:256, :, :], train_label1=train_label1_healthy[:256, :],
# train_label2=train_label2_healthy[:256, ])
#### 模型训练
# train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
train_step_one(train_data=train_data_healthy, train_label1=train_label1_healthy, train_label2=train_label2_healthy)
# 导入第一步已经训练好的模型,一个继续训练,一个只输出结果
step_one_model = Joint_Monitoring()

View File

@ -27,16 +27,16 @@ import random
'''超参数设置'''
time_stamp = 120
feature_num = 10
batch_size = 16
batch_size = 32
learning_rate = 0.001
EPOCH = 101
model_name = "RNet"
model_name = "RNet_banda"
'''EWMA超参数'''
K = 18
namuda = 0.01
'''保存名称'''
save_name = "./model/weight/{0}_timestamp{1}_feature{2}_weight_epoch2_loss0.007/weight".format(model_name,
save_name = "./model/weight/{0}/weight".format(model_name,
time_stamp,
feature_num,
batch_size,
@ -58,7 +58,8 @@ save_step_two_name = "./model/two_weight/{0}_timestamp{1}_feature{2}_weight/weig
# batch_size,
# EPOCH)
'''文件名'''
file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
'''文件名'''
file_name = "G:\data\SCADA数据\SCADA_已处理_粤水电达坂城2020.1月-5月\风机15.csv"
'''
文件说明jb4q_8_delete_total_zero.csv是删除了只删除了全是0的列的文件
@ -67,9 +68,9 @@ file_name = "G:\data\SCADA数据\jb4q_8_delete_total_zero.csv"
'''
'''文件参数'''
# 最后正常的时间点
healthy_date = 415548
healthy_date = 96748
# 最后异常的时间点
unhealthy_date = 432153
unhealthy_date = 107116
# 异常容忍程度
unhealthy_patience = 5

View File

@ -0,0 +1,460 @@
# _*_ coding: UTF-8 _*_
'''
@Author : dingjiawen
@Date : 2022/7/14 9:40
@Usage : 联合监测模型
@Desc : 将预测值放入分类器,分类器放两层逐渐递减的dense层
'''
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from model.DepthwiseCon1D.DepthwiseConv1D import DepthwiseConv1D
from model.Dynamic_channelAttention.Dynamic_channelAttention import DynamicChannelAttention, DynamicPooling
from condition_monitoring.data_deal import loadData
from model.LossFunction.smooth_L1_Loss import SmoothL1Loss
import math
class Joint_Monitoring(keras.Model):
def __init__(self, conv_filter=20,epochs=50):
# 调用父类__init__()方法
super(Joint_Monitoring, self).__init__()
# step one
self.RepDCBlock1 = RevConvBlock(num=3, kernel_size=5)
self.conv1 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=1, strides=2, padding='SAME')
self.upsample1 = tf.keras.layers.UpSampling1D(size=2)
self.DACU2 = DynamicChannelAttention()
self.RepDCBlock2 = RevConvBlock(num=3, kernel_size=3)
self.conv2 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=1, strides=2, padding='SAME')
self.upsample2 = tf.keras.layers.UpSampling1D(size=2)
self.DACU3 = DynamicChannelAttention()
self.RepDCBlock3 = RevConvBlock(num=3, kernel_size=3)
self.p1 = DynamicPooling(pool_size=2)
self.conv3 = tf.keras.layers.Conv1D(filters=2 * conv_filter, kernel_size=3, strides=2, padding='SAME')
self.DACU4 = DynamicChannelAttention()
self.RepDCBlock4 = RevConvBlock(num=3, kernel_size=3)
self.p2 = DynamicPooling(pool_size=4)
self.conv4 = tf.keras.layers.Conv1D(filters=conv_filter, kernel_size=3, strides=2, padding='SAME')
self.RepDCBlock5 = RevConvBlock(num=3, kernel_size=3)
self.p3 = DynamicPooling(pool_size=2)
# step two
# 重现原数据
self.GRU1 = tf.keras.layers.GRU(128, return_sequences=False)
self.d1 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
self.output1 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
self.GRU2 = tf.keras.layers.GRU(128, return_sequences=False)
self.d2 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
self.output2 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
self.GRU3 = tf.keras.layers.GRU(128, return_sequences=False)
self.d3 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
self.output3 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
# step three
# 分类器
self.d4 = tf.keras.layers.Dense(300, activation=tf.nn.leaky_relu)
self.d5 = tf.keras.layers.Dense(10, activation=tf.nn.leaky_relu)
# tf.nn.softmax
self.output4 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
# loss
self.train_loss = []
self.epoch=0
self.epochs=epochs
def call(self, inputs, training=None, mask=None, is_first_time: bool = True):
# step one
RepDCBlock1 = self.RepDCBlock1(inputs)
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
conv1 = self.conv1(RepDCBlock1)
conv1 = tf.nn.leaky_relu(conv1)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
upsample1 = self.upsample1(conv1)
DACU2 = self.DACU2(upsample1)
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
RepDCBlock2 = self.RepDCBlock2(DACU2)
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
conv2 = self.conv2(RepDCBlock2)
conv2 = tf.nn.leaky_relu(conv2)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
upsample2 = self.upsample2(conv2)
DACU3 = self.DACU3(upsample2)
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
RepDCBlock3 = self.RepDCBlock3(DACU3)
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
conv3 = self.conv3(RepDCBlock3)
conv3 = tf.nn.leaky_relu(conv3)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
concat1 = tf.concat([conv2, conv3], axis=1)
DACU4 = self.DACU4(concat1)
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
RepDCBlock4 = self.RepDCBlock4(DACU4)
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
conv4 = self.conv4(RepDCBlock4)
conv4 = tf.nn.leaky_relu(conv4)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
concat2 = tf.concat([conv1, conv4], axis=1)
RepDCBlock5 = self.RepDCBlock5(concat2)
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
output1 = []
output2 = []
output3 = []
output4 = []
if is_first_time:
# step two
# 重现原数据
# 接block3
GRU1 = self.GRU1(RepDCBlock3)
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
d1 = self.d1(GRU1)
# tf.nn.softmax
output1 = self.output1(d1)
# 接block4
GRU2 = self.GRU2(RepDCBlock4)
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
d2 = self.d2(GRU2)
# tf.nn.softmax
output2 = self.output2(d2)
# 接block5
GRU3 = self.GRU3(RepDCBlock5)
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
d3 = self.d3(GRU3)
# tf.nn.softmax
output3 = self.output3(d3)
else:
GRU1 = self.GRU1(RepDCBlock3)
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
d1 = self.d1(GRU1)
# tf.nn.softmax
output1 = self.output1(d1)
# 接block4
GRU2 = self.GRU2(RepDCBlock4)
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
d2 = self.d2(GRU2)
# tf.nn.softmax
output2 = self.output2(d2)
# 接block5
GRU3 = self.GRU3(RepDCBlock5)
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
d3 = self.d3(GRU3)
# tf.nn.softmax
output3 = self.output3(d3)
# 多尺度动态池化
# p1 = self.p1(output1)
# B, _, _ = p1.shape
# f1 = tf.reshape(p1, shape=[B, -1])
# p2 = self.p2(output2)
# f2 = tf.reshape(p2, shape=[B, -1])
# p3 = self.p3(output3)
# f3 = tf.reshape(p3, shape=[B, -1])
# step three
# 分类器
concat3 = tf.concat([output1, output2, output3], axis=1)
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
d4 = self.d4(concat3)
d5 = self.d5(d4)
# d4 = tf.keras.layers.BatchNormalization()(d4)
output4 = self.output4(d5)
return output1, output2, output3, output4
def get_loss(self, inputs_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
pred_5=None):
# step one
RepDCBlock1 = self.RepDCBlock1(inputs_tensor)
RepDCBlock1 = tf.keras.layers.BatchNormalization()(RepDCBlock1)
conv1 = self.conv1(RepDCBlock1)
conv1 = tf.nn.leaky_relu(conv1)
conv1 = tf.keras.layers.BatchNormalization()(conv1)
upsample1 = self.upsample1(conv1)
DACU2 = self.DACU2(upsample1)
DACU2 = tf.keras.layers.BatchNormalization()(DACU2)
RepDCBlock2 = self.RepDCBlock2(DACU2)
RepDCBlock2 = tf.keras.layers.BatchNormalization()(RepDCBlock2)
conv2 = self.conv2(RepDCBlock2)
conv2 = tf.nn.leaky_relu(conv2)
conv2 = tf.keras.layers.BatchNormalization()(conv2)
upsample2 = self.upsample2(conv2)
DACU3 = self.DACU3(upsample2)
DACU3 = tf.keras.layers.BatchNormalization()(DACU3)
RepDCBlock3 = self.RepDCBlock3(DACU3)
RepDCBlock3 = tf.keras.layers.BatchNormalization()(RepDCBlock3)
conv3 = self.conv3(RepDCBlock3)
conv3 = tf.nn.leaky_relu(conv3)
conv3 = tf.keras.layers.BatchNormalization()(conv3)
concat1 = tf.concat([conv2, conv3], axis=1)
DACU4 = self.DACU4(concat1)
DACU4 = tf.keras.layers.BatchNormalization()(DACU4)
RepDCBlock4 = self.RepDCBlock4(DACU4)
RepDCBlock4 = tf.keras.layers.BatchNormalization()(RepDCBlock4)
conv4 = self.conv4(RepDCBlock4)
conv4 = tf.nn.leaky_relu(conv4)
conv4 = tf.keras.layers.BatchNormalization()(conv4)
concat2 = tf.concat([conv1, conv4], axis=1)
RepDCBlock5 = self.RepDCBlock5(concat2)
RepDCBlock5 = tf.keras.layers.BatchNormalization()(RepDCBlock5)
if is_first_time:
# step two
# 重现原数据
# 接block3
GRU1 = self.GRU1(RepDCBlock3)
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
d1 = self.d1(GRU1)
# tf.nn.softmax
output1 = self.output1(d1)
# 接block4
GRU2 = self.GRU2(RepDCBlock4)
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
d2 = self.d2(GRU2)
# tf.nn.softmax
output2 = self.output2(d2)
# 接block5
GRU3 = self.GRU3(RepDCBlock5)
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
d3 = self.d3(GRU3)
# tf.nn.softmax
output3 = self.output3(d3)
# reduce_mean降维计算均值
MSE_loss1 = SmoothL1Loss()(y_true=label1, y_pred=output1)
MSE_loss2 = SmoothL1Loss()(y_true=label1, y_pred=output2)
MSE_loss3 = SmoothL1Loss()(y_true=label1, y_pred=output3)
# MSE_loss1 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output1))
# MSE_loss2 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output2))
# MSE_loss3 = tf.reduce_mean(tf.keras.losses.mse(y_true=label1, y_pred=output3))
print("MSE_loss1:", MSE_loss1.numpy())
print("MSE_loss2:", MSE_loss2.numpy())
print("MSE_loss3:", MSE_loss3.numpy())
loss = MSE_loss1 + MSE_loss2 + MSE_loss3
Accuracy_num = 0
else:
# step two
# 重现原数据
# 接block3
GRU1 = self.GRU1(RepDCBlock3)
GRU1 = tf.keras.layers.BatchNormalization()(GRU1)
d1 = self.d1(GRU1)
# tf.nn.softmax
output1 = self.output1(d1)
# 接block4
GRU2 = self.GRU2(RepDCBlock4)
GRU2 = tf.keras.layers.BatchNormalization()(GRU2)
d2 = self.d2(GRU2)
# tf.nn.softmax
output2 = self.output2(d2)
# 接block5
GRU3 = self.GRU3(RepDCBlock5)
GRU3 = tf.keras.layers.BatchNormalization()(GRU3)
d3 = self.d3(GRU3)
# tf.nn.softmax
output3 = self.output3(d3)
# 多尺度动态池化
# p1 = self.p1(output1)
# B, _, _ = p1.shape
# f1 = tf.reshape(p1, shape=[B, -1])
# p2 = self.p2(output2)
# f2 = tf.reshape(p2, shape=[B, -1])
# p3 = self.p3(output3)
# f3 = tf.reshape(p3, shape=[B, -1])
# step three
# 分类器
concat3 = tf.concat([output1, output2, output3], axis=1)
# dropout = tf.keras.layers.Dropout(0.25)(concat3)
d4 = self.d4(concat3)
d5 = self.d5(d4)
# d4 = tf.keras.layers.BatchNormalization()(d4)
output4 = self.output4(d5)
# reduce_mean降维计算均值
a = 0.02
beta = 0.5 * math.cos(min(self.epoch * 2 / self.epochs, 1) * math.pi) + 0.5
MSE_loss = SmoothL1Loss()(y_true=pred_3, y_pred=output1)
MSE_loss += SmoothL1Loss()(y_true=pred_4, y_pred=output2)
MSE_loss += SmoothL1Loss()(y_true=pred_5, y_pred=output3)
Cross_Entropy_loss = tf.reduce_mean(
tf.losses.binary_crossentropy(y_true=label2, y_pred=output4, from_logits=True))
print("MSE_loss:", MSE_loss.numpy())
print("Cross_Entropy_loss:", Cross_Entropy_loss.numpy())
Accuracy_num = self.get_Accuracy(label=label2, output=output4)
loss = beta * MSE_loss + a * Cross_Entropy_loss
return loss, Accuracy_num
def get_Accuracy(self, output, label):
predict_label = tf.round(output)
label = tf.cast(label, dtype=tf.float32)
t = np.array(label - predict_label)
b = t[t[:] == 0]
return b.__len__()
def get_grad(self, input_tensor, label1=None, label2=None, is_first_time: bool = True, pred_3=None, pred_4=None,
pred_5=None):
with tf.GradientTape() as tape:
# todo 原本tape只会监控由tf.Variable创建的trainable=True属性
# tape.watch(self.variables)
L, Accuracy_num = self.get_loss(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
pred_3=pred_3,
pred_4=pred_4, pred_5=pred_5)
# 保存一下loss用于输出
self.train_loss = L
g = tape.gradient(L, self.variables)
return g, Accuracy_num
def train(self, input_tensor, label1=None, label2=None, learning_rate=1e-3, is_first_time: bool = True, pred_3=None,
pred_4=None, pred_5=None,epoch=0):
self.epoch=epoch
g, Accuracy_num = self.get_grad(input_tensor, label1=label1, label2=label2, is_first_time=is_first_time,
pred_3=pred_3,
pred_4=pred_4, pred_5=pred_5)
optimizers.Adam(learning_rate).apply_gradients(zip(g, self.variables))
return self.train_loss, Accuracy_num
# 暂时只支持batch_size等于1,不然要传z比较麻烦
def get_val_loss(self, val_data, val_label1, val_label2, batch_size=16, is_first_time: bool = True,
step_one_model=None,epoch=0):
val_loss = []
self.epoch=epoch
accuracy_num = 0
output1 = 0
output2 = 0
output3 = 0
z = 1
size, length, dims = val_data.shape
if batch_size == None:
batch_size = self.batch_size
for epoch in range(0, size - batch_size, batch_size):
each_val_data = val_data[epoch:epoch + batch_size, :, :]
each_val_label1 = val_label1[epoch:epoch + batch_size, :]
each_val_label2 = val_label2[epoch:epoch + batch_size, ]
# each_val_data = tf.expand_dims(each_val_data, axis=0)
# each_val_query = tf.expand_dims(each_val_query, axis=0)
# each_val_label = tf.expand_dims(each_val_label, axis=0)
if not is_first_time:
output1, output2, output3, _ = step_one_model.call(inputs=each_val_data, is_first_time=True)
each_loss, each_accuracy_num = self.get_loss(each_val_data, each_val_label1, each_val_label2,
is_first_time=is_first_time,
pred_3=output1, pred_4=output2, pred_5=output3)
accuracy_num += each_accuracy_num
val_loss.append(each_loss)
z += 1
val_accuracy = accuracy_num / ((z - 1) * batch_size)
val_total_loss = tf.reduce_mean(val_loss)
return val_total_loss, val_accuracy
class RevConv(keras.layers.Layer):
def __init__(self, kernel_size=3):
# 调用父类__init__()方法
super(RevConv, self).__init__()
self.kernel_size = kernel_size
def get_config(self):
# 自定义层里面的属性
config = (
{
'kernel_size': self.kernel_size
}
)
base_config = super(RevConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
# print(input_shape)
_, _, output_dim = input_shape[0], input_shape[1], input_shape[2]
self.conv1 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=self.kernel_size, strides=1,
padding='causal',
dilation_rate=4)
self.conv2 = tf.keras.layers.Conv1D(filters=output_dim, kernel_size=1, strides=1, padding='causal',
dilation_rate=4)
# self.b2 = tf.keras.layers.BatchNormalization()
# self.b3 = tf.keras.layers.BatchNormalization()
# out = tf.keras.layers.Add()([b1, b2, b3])
# out = tf.nn.relu(out)
def call(self, inputs, **kwargs):
conv1 = self.conv1(inputs)
b1 = tf.keras.layers.BatchNormalization()(conv1)
b1 = tf.nn.leaky_relu(b1)
# b1 = self.b1
conv2 = self.conv2(inputs)
b2 = tf.keras.layers.BatchNormalization()(conv2)
b2 = tf.nn.leaky_relu(b2)
b3 = tf.keras.layers.BatchNormalization()(inputs)
out = tf.keras.layers.Add()([b1, b2, b3])
out = tf.nn.relu(out)
return out
class RevConvBlock(keras.layers.Layer):
def __init__(self, num: int = 3, kernel_size=3):
# 调用父类__init__()方法
super(RevConvBlock, self).__init__()
self.num = num
self.kernel_size = kernel_size
self.L = []
for i in range(num):
RepVGG = RevConv(kernel_size=kernel_size)
self.L.append(RepVGG)
def get_config(self):
# 自定义层里面的属性
config = (
{
'kernel_size': self.kernel_size,
'num': self.num
}
)
base_config = super(RevConvBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, **kwargs):
for i in range(self.num):
inputs = self.L[i](inputs)
return inputs